def test_random_restart_optimization(self, cuda=False):
     for double in (True, False):
         self._setUp(double=double, cuda=cuda)
         with gpt_settings.debug(False):
             best_f = self.model(self.train_x).mean.max().item()
         qEI = qExpectedImprovement(self.model, best_f=best_f)
         bounds = torch.tensor([[0.0], [1.0]]).type_as(self.train_x)
         batch_ics = torch.rand(2, 1).type_as(self.train_x)
         batch_candidates, batch_acq_values = gen_candidates_scipy(
             initial_conditions=batch_ics,
             acquisition_function=qEI,
             lower_bounds=bounds[0],
             upper_bounds=bounds[1],
             options={"maxiter": 3},
         )
         candidates = get_best_candidates(batch_candidates=batch_candidates,
                                          batch_values=batch_acq_values)
         self.assertTrue(-EPS <= candidates <= 1 + EPS)
 def test_random_restart_optimization(self, cuda=False):
     for double in (True, False):
         self._setUp(double=double, cuda=cuda)
         with settings.debug(False):
             best_f = self.model(self.train_x).mean.max().item()
         qEI = qExpectedImprovement(self.model, best_f=best_f)
         bounds = torch.tensor([[0.0], [1.0]]).type_as(self.train_x)
         batch_ics = torch.rand(2, 1).type_as(self.train_x)
         batch_candidates, batch_acq_values = gen_candidates_scipy(
             initial_conditions=batch_ics,
             acquisition_function=qEI,
             lower_bounds=bounds[0],
             upper_bounds=bounds[1],
             options={"maxiter": 3},
         )
         candidates = get_best_candidates(
             batch_candidates=batch_candidates, batch_values=batch_acq_values
         )
         self.assertTrue(-EPS <= candidates <= 1 + EPS)
    def optimize_acqui_use_restarts_individually(self, options):

        # Get initial random restart points:
        self.my_print("[get_next_point()] Generating random restarts ...")
        initial_conditions = gen_batch_initial_conditions(
            acq_function=self,
            bounds=self.bounds,
            q=1,
            num_restarts=self.Nrestarts,
            raw_samples=500,
            options=options)

        self.my_print(
            "[get_next_point()] Optimizing acquisition function with {0:d} restarts ..."
            .format(self.Nrestarts))
        x_next_many = torch.zeros(size=(self.Nrestarts, 1, self.dim))
        alpha_next_many = torch.zeros(size=(self.Nrestarts, ))
        for k in range(self.Nrestarts):

            if (k + 1) % 5 == 0:
                self.my_print(
                    "[get_next_point()] restart {0:d} / {1:d}".format(
                        k + 1, self.Nrestarts))

            x_next_many[k, :], alpha_next_many[k] = gen_candidates_scipy(
                initial_conditions=initial_conditions[k, :].view(
                    (1, 1, self.dim)),
                acquisition_function=self,
                lower_bounds=0.0,
                upper_bounds=1.0,
                options=options)

        # Get the best:
        self.my_print("[get_next_point()] Getting best candidates ...")
        x_next = get_best_candidates(x_next_many, alpha_next_many).detach()
        alpha_next = self.forward(x_next).detach()

        return x_next, alpha_next
Example #4
0
def optimize_UCB(acq_function, x_bounds, unscale_y_fxn, seed):
    """Optimize UCB using random restarts"""
    # The alternative would be to call botorch.optim.joint_optimize().
    # However, that function is rather specialized for EI.
    # Hence a more custom option for ensuring good performance for UCB.
    x_init_batch_all = None
    y_init_batch_all = None
    for rnd in range(GPConstants.N_RESTART_ROUNDS):
        # TODO: init/pass seed to draw_sobol_samples() for reproducibility.
        x_rnd = draw_sobol_samples(bounds=x_bounds,
                                   seed=seed,
                                   n=GPConstants.N_RESTART_CANDIDATES,
                                   q=1)
        # The code below is like initialize_q_batch(), but with stability checks.
        # botorch.optim.initialize_q_batch() also does a few more hacks like:
        # max_val, max_idx = torch.max(y_rnd, dim=0)
        # if max_idx not in idcs: idcs[-1] = max_idx # make sure we get the maximum
        # These hacks don't seem to help the worst cases, so we don't include them.
        x_init_batch = None
        y_init_batch = None
        try:
            with torch.no_grad():
                y_rnd = acq_function(x_rnd)
            finite_ids = torch.isfinite(y_rnd)
            x_rnd_ok = x_rnd[finite_ids]
            y_rnd_ok = y_rnd[finite_ids]
            y_rnd_std = y_rnd.std()
            if torch.isfinite(y_rnd_std) and y_rnd_std > GPConstants.MIN_STD:
                z = y_rnd - y_rnd.mean() / y_rnd_std
                weights = torch.exp(1.0 * z)
                bad_weights = (torch.isnan(weights).any()
                               or torch.isinf(weights).any()
                               or (weights < 0).any() or weights.sum() <= 0)
                if not bad_weights:
                    idcs = torch.multinomial(weights,
                                             GPConstants.N_RESTARTS_PER_ROUND)
                    x_init_batch = x_rnd_ok[idcs]
                    y_init_batch = y_rnd_ok[idcs]
            if x_init_batch is None and x_rnd_ok.size(0) > 0:
                idcs = torch.randperm(n=x_rnd_ok.size(0))
                x_init_batch = x_rnd_ok[idcs][:GPConstants.
                                              N_RESTARTS_PER_ROUND]
                y_init_batch = y_rnd_ok[idcs][:GPConstants.
                                              N_RESTARTS_PER_ROUND]
        except RuntimeError as e:
            logging.info('WARNING: acq_function threw RuntimeError:')
            logging.info(e)
        if x_init_batch is None: continue  # GP-based queries failed
        if x_init_batch_all is None:
            x_init_batch_all = x_init_batch
            y_init_batch_all = y_init_batch
        else:
            x_init_batch_all = torch.cat([x_init_batch_all, x_init_batch],
                                         dim=0)
            y_init_batch_all = torch.cat([y_init_batch_all, y_init_batch],
                                         dim=0)
    # If GP-based queries failed (e.g. Matern Cholesky failed) use random pts.
    if x_init_batch_all is None:
        logging.info('WARNING: all acq_function tries failed; sample randomly')
        nrnd = GPConstants.N_RESTARTS_PER_ROUND * GPConstants.N_RESTART_ROUNDS
        x_init_batch_all = draw_sobol_samples(bounds=x_bounds, n=nrnd, q=1)
    else:
        # Print logs about predicted y of the points.
        y_init_batch_all_sorted, _ = y_init_batch_all.sort(descending=True)
        logging.info('optimize_UCB y_init_batch_all scaled')
        logging.info(y_init_batch_all.size())
        logging.info(y_init_batch_all_sorted)
        y_init_batch_all_unscaled, _ = unscale_y_fxn(y_init_batch_all).sort(
            descending=True)
        logging.info('optimize_UCB y_init_batch_all unscaled')
        logging.info(y_init_batch_all_sorted.size())
        logging.info(y_init_batch_all_unscaled)
    # ATTENTION: gen_candidates_scipy does not clean up GPU tensors, memory
    # usage sometimes grows, and gc.collect() does not help.
    # TODO: Need to file a botorch bug.
    try:
        batch_candidates, batch_acq_values = gen_candidates_scipy(
            initial_conditions=x_init_batch_all,
            acquisition_function=acq_function,
            lower_bounds=x_bounds[0],
            upper_bounds=x_bounds[1],
            options={
                'maxiter': GPConstants.MAX_ACQ_ITER,
                'method': 'L-BFGS-B'
            })  # select L-BFGS-B for reasonable speed
        assert (torch.isfinite(batch_candidates).all())
        #remove_too_close(gp_model, batch_candidates, batch_acq_values)
        # same code as in botorch.gen.get_best_candidates()
        best_acq_y, best_id = torch.max(batch_acq_values.view(-1), dim=0)
        next_x = batch_candidates[best_id].squeeze(0).detach()
    except RuntimeError as e:
        logging.info('WARNING: gen_candidates_scipy threw RuntimeError:')
        logging.info(e)
        next_x = x_init_batch_all[0].squeeze()
        best_acq_y = torch.tensor(-1000000.0).to(device=next_x.device)
    # Check x is within the [0,1] boundaries.
    if (next_x < 0).any() or (next_x > 1).all() or (next_x != next_x).any():
        print('WARNING: GP optimization returned next_x', next_x)
        next_x = torch.zeros_like(next_x)
    return next_x, best_acq_y
    def get_safe_evaluation(self, rho_t):

        # Gather fmin samples, using the Frechet distribution:
        fmin_samples = get_fmin_samples_from_gp(
            model=self.model_list[0],
            Nsamples=self.Nsamples_fmin,
            eta=self.eta_c)  # This assumes self.eta has been updated
        self.update_u_vec(fmin_samples)

        self.which_mode = "safe"

        self.my_print(
            "[get_safe_evaluation()] Computing next candidate by maximizing the acquisition function ..."
        )
        options = {
            "batch_limit": 50,
            "maxiter": 300,
            "ftol": 1e-6,
            "method": self.method_safe,
            "iprint": 2,
            "maxls": 20,
            "disp": self.disp_info_scipy_opti
        }
        # x_next, alpha_next = optimize_acqf(acq_function=self,bounds=self.bounds,q=1,num_restarts=self.Nrestarts_safe,raw_samples=500,return_best_only=True,options=options)
        # pdb.set_trace()

        # Get initial random restart points:
        self.my_print("[get_safe_evaluation()] Generating random restarts ...")
        initial_conditions = gen_batch_initial_conditions(
            acq_function=self,
            bounds=self.bounds,
            q=1,
            num_restarts=self.Nrestarts_safe,
            raw_samples=500,
            options=options)
        # print("initial_conditions.shape:",initial_conditions.shape)

        # BOtorch does not support constrained optimization with non-linear constraints. Because of this, it provides
        # a work-around solution to optimize using a sigmoid function to push the acquisition function to zero in regions
        # where the probabilistic constraint is not satisfied (i.e., areas where Pr(g(x) <= 0)) < rho_t.
        self.my_print(
            "[get_safe_evaluation()] Optimizing acquisition function ...")
        x_next_many, alpha_next_many = gen_candidates_scipy(
            initial_conditions=initial_conditions,
            acquisition_function=self,
            lower_bounds=0.0,
            upper_bounds=1.0,
            options=options)
        # Get the best:
        self.my_print("[get_safe_evaluation()] Getting best candidates ...")
        x_next = get_best_candidates(x_next_many, alpha_next_many)

        # pdb.set_trace()

        # However, the above optimization does not guarantee that the constraint will be satisfied. The reason for this is that the
        # sigmoid may have a small but non-zero mass in unsafe regions; then, a maximum could be found there in case
        # the rest of the safe areas are such that the acquisition function is even nearer to zero. If that's the case
        # we trigger a proper non-linear optimizer able to explicitly handle constraints.
        if self.probabilistic_constraint(
                x_next
        ) > 1e-6:  # If the constraint is violated above a tolerance, use nlopt
            self.my_print(
                "[get_safe_evaluation()] scipy optimization recommended an unfeasible point. Re-run using nlopt ..."
            )
            self.use_nlopt = True
            x_next, alpha_next = self.constrained_opt.run_constrained_minimization(
                initial_conditions.view((self.Nrestarts_safe, self.dim)))
            self.use_nlopt = False
        else:
            self.my_print(
                "[get_safe_evaluation()] scipy optimization finished successfully!"
            )
            alpha_next = self.forward(x_next)

        self.my_print("Pr(g(x_next) <= 0): {0:2.8f}".format(
            self.get_probability_of_safe_evaluation(x_next).item()))

        # Using botorch optimizer:
        # x_next, alpha_next = optimize_acqf(acq_function=self,bounds=self.bounds,q=1,num_restarts=self.Nrestarts_safe,raw_samples=500,return_best_only=True,options=options)

        # # The code below spits out: Unknown solver options: constraints. Using nlopt instead
        # constraints = [dict(type="ineq",fun=self.probabilistic_constraint)]
        # options = {"batch_limit": 1, "maxiter": 200, "ftol": 1e-6, "method": self.method_risky, "constraints": constraints}
        # x_next,alpha_next = optimize_acqf(acq_function=self,bounds=self.bounds,q=1,num_restarts=self.Nrestarts,
        # 																	raw_samples=500,return_best_only=True,options=options,)

        self.my_print("Done!")

        return x_next, alpha_next