Exemple #1
0
def optimize_acqf_and_get_observation(acq_func, x0, y0):
    # Optimizes the acquisition function, returns new candidate new_x
    # and its objective function value new_obj

    # optimize
    if args.optimize_acq == 'scipy':
        candidates = joint_optimize(
            acq_function=acq_func,
            bounds=bounds,
            q=args.q,
            num_restarts=args.num_restarts,
            raw_samples=200,
        )
    else:
        Xinit = gen_batch_initial_conditions(acq_func,
                                             bounds,
                                             q=args.q,
                                             num_restarts=args.num_restarts,
                                             raw_samples=500)
        batch_candidates, batch_acq_values = gen_candidates_torch(
            initial_conditions=Xinit,
            acquisition_function=acq_func,
            lower_bounds=bounds[0],
            upper_bounds=bounds[1],
            verbose=False)
        candidates = get_best_candidates(batch_candidates, batch_acq_values)

    # observe new values
    new_x = candidates.detach()
    if not args.inf_norm:
        new_x = latent_proj(new_x, args.eps)
    new_obj = obj_func(new_x, x0, y0)
    return new_x, new_obj
 def test_random_restart_optimization(self, cuda=False):
     for double in (True, False):
         self._setUp(double=double, cuda=cuda)
         with gpt_settings.debug(False):
             best_f = self.model(self.train_x).mean.max().item()
         qEI = qExpectedImprovement(self.model, best_f=best_f)
         bounds = torch.tensor([[0.0], [1.0]]).type_as(self.train_x)
         batch_ics = torch.rand(2, 1).type_as(self.train_x)
         batch_candidates, batch_acq_values = gen_candidates_scipy(
             initial_conditions=batch_ics,
             acquisition_function=qEI,
             lower_bounds=bounds[0],
             upper_bounds=bounds[1],
             options={"maxiter": 3},
         )
         candidates = get_best_candidates(batch_candidates=batch_candidates,
                                          batch_values=batch_acq_values)
         self.assertTrue(-EPS <= candidates <= 1 + EPS)
 def test_random_restart_optimization(self, cuda=False):
     for double in (True, False):
         self._setUp(double=double, cuda=cuda)
         with settings.debug(False):
             best_f = self.model(self.train_x).mean.max().item()
         qEI = qExpectedImprovement(self.model, best_f=best_f)
         bounds = torch.tensor([[0.0], [1.0]]).type_as(self.train_x)
         batch_ics = torch.rand(2, 1).type_as(self.train_x)
         batch_candidates, batch_acq_values = gen_candidates_scipy(
             initial_conditions=batch_ics,
             acquisition_function=qEI,
             lower_bounds=bounds[0],
             upper_bounds=bounds[1],
             options={"maxiter": 3},
         )
         candidates = get_best_candidates(
             batch_candidates=batch_candidates, batch_values=batch_acq_values
         )
         self.assertTrue(-EPS <= candidates <= 1 + EPS)
    def optimize_acqui_use_restarts_individually(self, options):

        # Get initial random restart points:
        self.my_print("[get_next_point()] Generating random restarts ...")
        initial_conditions = gen_batch_initial_conditions(
            acq_function=self,
            bounds=self.bounds,
            q=1,
            num_restarts=self.Nrestarts,
            raw_samples=500,
            options=options)

        self.my_print(
            "[get_next_point()] Optimizing acquisition function with {0:d} restarts ..."
            .format(self.Nrestarts))
        x_next_many = torch.zeros(size=(self.Nrestarts, 1, self.dim))
        alpha_next_many = torch.zeros(size=(self.Nrestarts, ))
        for k in range(self.Nrestarts):

            if (k + 1) % 5 == 0:
                self.my_print(
                    "[get_next_point()] restart {0:d} / {1:d}".format(
                        k + 1, self.Nrestarts))

            x_next_many[k, :], alpha_next_many[k] = gen_candidates_scipy(
                initial_conditions=initial_conditions[k, :].view(
                    (1, 1, self.dim)),
                acquisition_function=self,
                lower_bounds=0.0,
                upper_bounds=1.0,
                options=options)

        # Get the best:
        self.my_print("[get_next_point()] Getting best candidates ...")
        x_next = get_best_candidates(x_next_many, alpha_next_many).detach()
        alpha_next = self.forward(x_next).detach()

        return x_next, alpha_next
def joint_optimize(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    q: int,
    num_restarts: int,
    raw_samples: int,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
    constraints=(),
    fixed_features: Optional[Dict[int, float]] = None,
    post_processing_init: Optional[Callable[[Tensor], Tensor]] = None,
) -> Tensor:
    """
    This function generates a set of candidates via joint multi-start optimization

    Parameters
    ----------
    :param acq_function: the acquisition function
    :param bounds: a `2 x d` tensor of lower and upper bounds for each column of `X`
    :param q: number of candidates
    :param num_restarts: number of starting points for multistart acquisition function optimization
    :param raw_samples: number of samples for initialization


    Optional parameters
    -------------------
    :param options: options for candidate generation
    :param constraints: constraints in scipy format
    :param fixed_features: A map {feature_index: value} for features that should be fixed to a particular value
        during generation.
    :param post_processing_init: A function that post processes the generated initial samples
        (e.g. so that they fulfill some constraints).

    Returns
    -------
    :return: a `q x d` tensor of generated candidates.
    """

    options = options or {}
    batch_initial_conditions = \
        gen_batch_initial_conditions(acq_function=acq_function, bounds=bounds,
                                     q=None if isinstance(acq_function, AnalyticAcquisitionFunction) else q,
                                     num_restarts=num_restarts, raw_samples=raw_samples,
                                     options=options, post_processing_init=post_processing_init)
    batch_limit = options.get("batch_limit", num_restarts)
    batch_candidates_list = []
    batch_acq_values_list = []
    start_idx = 0
    while start_idx < num_restarts:
        end_idx = min(start_idx + batch_limit, num_restarts)
        # optimize using random restart optimization
        batch_candidates_curr, batch_acq_values_curr = \
            gen_candidates_scipy(initial_conditions=batch_initial_conditions[start_idx:end_idx],
                                 acquisition_function=acq_function, lower_bounds=bounds[0], upper_bounds=bounds[1],
                                 options={k: v for k, v in options.items() if k not in ("batch_limit", "nonnegative")},
                                 constraints=constraints, fixed_features=fixed_features)
        batch_candidates_list.append(batch_candidates_curr)
        batch_acq_values_list.append(batch_acq_values_curr)
        start_idx += batch_limit

    batch_candidates = torch.cat(batch_candidates_list)
    batch_acq_values = torch.cat(batch_acq_values_list)
    return get_best_candidates(batch_candidates=batch_candidates,
                               batch_values=batch_acq_values)
    def get_safe_evaluation(self, rho_t):

        # Gather fmin samples, using the Frechet distribution:
        fmin_samples = get_fmin_samples_from_gp(
            model=self.model_list[0],
            Nsamples=self.Nsamples_fmin,
            eta=self.eta_c)  # This assumes self.eta has been updated
        self.update_u_vec(fmin_samples)

        self.which_mode = "safe"

        self.my_print(
            "[get_safe_evaluation()] Computing next candidate by maximizing the acquisition function ..."
        )
        options = {
            "batch_limit": 50,
            "maxiter": 300,
            "ftol": 1e-6,
            "method": self.method_safe,
            "iprint": 2,
            "maxls": 20,
            "disp": self.disp_info_scipy_opti
        }
        # x_next, alpha_next = optimize_acqf(acq_function=self,bounds=self.bounds,q=1,num_restarts=self.Nrestarts_safe,raw_samples=500,return_best_only=True,options=options)
        # pdb.set_trace()

        # Get initial random restart points:
        self.my_print("[get_safe_evaluation()] Generating random restarts ...")
        initial_conditions = gen_batch_initial_conditions(
            acq_function=self,
            bounds=self.bounds,
            q=1,
            num_restarts=self.Nrestarts_safe,
            raw_samples=500,
            options=options)
        # print("initial_conditions.shape:",initial_conditions.shape)

        # BOtorch does not support constrained optimization with non-linear constraints. Because of this, it provides
        # a work-around solution to optimize using a sigmoid function to push the acquisition function to zero in regions
        # where the probabilistic constraint is not satisfied (i.e., areas where Pr(g(x) <= 0)) < rho_t.
        self.my_print(
            "[get_safe_evaluation()] Optimizing acquisition function ...")
        x_next_many, alpha_next_many = gen_candidates_scipy(
            initial_conditions=initial_conditions,
            acquisition_function=self,
            lower_bounds=0.0,
            upper_bounds=1.0,
            options=options)
        # Get the best:
        self.my_print("[get_safe_evaluation()] Getting best candidates ...")
        x_next = get_best_candidates(x_next_many, alpha_next_many)

        # pdb.set_trace()

        # However, the above optimization does not guarantee that the constraint will be satisfied. The reason for this is that the
        # sigmoid may have a small but non-zero mass in unsafe regions; then, a maximum could be found there in case
        # the rest of the safe areas are such that the acquisition function is even nearer to zero. If that's the case
        # we trigger a proper non-linear optimizer able to explicitly handle constraints.
        if self.probabilistic_constraint(
                x_next
        ) > 1e-6:  # If the constraint is violated above a tolerance, use nlopt
            self.my_print(
                "[get_safe_evaluation()] scipy optimization recommended an unfeasible point. Re-run using nlopt ..."
            )
            self.use_nlopt = True
            x_next, alpha_next = self.constrained_opt.run_constrained_minimization(
                initial_conditions.view((self.Nrestarts_safe, self.dim)))
            self.use_nlopt = False
        else:
            self.my_print(
                "[get_safe_evaluation()] scipy optimization finished successfully!"
            )
            alpha_next = self.forward(x_next)

        self.my_print("Pr(g(x_next) <= 0): {0:2.8f}".format(
            self.get_probability_of_safe_evaluation(x_next).item()))

        # Using botorch optimizer:
        # x_next, alpha_next = optimize_acqf(acq_function=self,bounds=self.bounds,q=1,num_restarts=self.Nrestarts_safe,raw_samples=500,return_best_only=True,options=options)

        # # The code below spits out: Unknown solver options: constraints. Using nlopt instead
        # constraints = [dict(type="ineq",fun=self.probabilistic_constraint)]
        # options = {"batch_limit": 1, "maxiter": 200, "ftol": 1e-6, "method": self.method_risky, "constraints": constraints}
        # x_next,alpha_next = optimize_acqf(acq_function=self,bounds=self.bounds,q=1,num_restarts=self.Nrestarts,
        # 																	raw_samples=500,return_best_only=True,options=options,)

        self.my_print("Done!")

        return x_next, alpha_next
Exemple #7
0
def joint_optimize_manifold(
    acq_function: AcquisitionFunction,
    manifold: Manifold,
    solver: Solver,
    q: int,
    num_restarts: int,
    raw_samples: int,
    bounds: Tensor,
    sample_type: torch.dtype = torch.float64,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
    inequality_constraints: Optional[List[Callable]] = None,
    equality_constraints: Optional[List[Callable]] = None,
    pre_processing_manifold: Optional[Callable[[Tensor], Tensor]] = None,
    post_processing_manifold: Optional[Callable[[Tensor], Tensor]] = None,
    approx_hessian: bool = False,
    solver_init_conds: bool = False,
) -> Tensor:
    """
    This function generates a set of candidates via joint multi-start optimization

    Parameters
    ----------
    :param acq_function: the acquisition function
    :param manifold: the manifold in the optimization takes place (pymanopt manifold)
    :param solver: solver on manifold to solve the optimization (pymanopt solver)
    :param q: number of candidates
    :param num_restarts: number of starting points for multistart acquisition function optimization
    :param raw_samples: number of samples for initialization
    :param bounds: a `2 x d` tensor of lower and upper bounds for each column of `X`
    :param sample_type: type of the generated samples for initialization

    Optional parameters
    -------------------
    :param options: options for candidate generation
    :param inequality_constraints: inequality constraint or list of inequality constraints, satisfied if >= 0
    :param equality_constraints: equality constraint or list of equality constraints, satisfied if = 0
    :param pre_processing_manifold: a function that pre-process the data on the manifold before the optimization.
        Typically, this can be used to transform vectors (required by the GP) to the corresponding matrices (required
        for matrix-manifold optimization)
    :param post_processing_manifold: a function that post-process the data on the manifold after the optimization.
        Typically, this can be used to transform matrices (required for matrix-manifold optimization) to vectors
        (required by the GP).
    :param approx_hessian: if True, the Hessian of the cost is approximated with finite differences of the gradient
    :param solver_init_conds: if True, the initialization is made inside the solver. This has to be True for
        population-based methods, e.g. PSO, Nelder mead.

    Returns
    -------
    :return: a `q x d` tensor of generated candidates.
    """

    options = options or {}
    batch_initial_conditions = \
        gen_batch_initial_conditions_manifold(acq_function=acq_function, manifold=manifold, bounds=bounds,
                                              q=None if isinstance(acq_function, AnalyticAcquisitionFunction) else q,
                                              num_restarts=num_restarts, raw_samples=raw_samples,
                                              sample_type=sample_type,
                                              options=options, post_processing_manifold=post_processing_manifold)

    batch_limit = options.get("batch_limit", num_restarts)
    batch_candidates_list = []
    batch_acq_values_list = []
    start_idx = 0
    while start_idx < num_restarts:
        end_idx = min(start_idx + batch_limit, num_restarts)
        # optimize using random restart optimization
        batch_candidates_curr, batch_acq_values_curr = \
            gen_candidates_manifold(initial_conditions=batch_initial_conditions[start_idx:end_idx],
                                    acquisition_function=acq_function, manifold=manifold, solver=solver,
                                    pre_processing_manifold=pre_processing_manifold,
                                    post_processing_manifold=post_processing_manifold,
                                    lower_bounds=bounds[0], upper_bounds=bounds[1],
                                    options={k: v for k, v in options.items()
                                             if k not in ("batch_limit", "nonnegative")},
                                    inequality_constraints=inequality_constraints,
                                    equality_constraints=equality_constraints,
                                    approx_hessian=approx_hessian, solver_init_conds=solver_init_conds)

        batch_candidates_list.append(batch_candidates_curr)
        batch_acq_values_list.append(batch_acq_values_curr)
        start_idx += batch_limit

    batch_candidates = torch.cat(batch_candidates_list)
    batch_acq_values = torch.cat(batch_acq_values_list)
    return get_best_candidates(batch_candidates=batch_candidates, batch_values=batch_acq_values)