def get_initial_evaluations(dim,eval_type=1):

	if dim == 1:
		if eval_type == 1:
			train_x = torch.tensor([[0.1],[0.3],[0.65],[0.7],[0.9]],device=device, dtype=torch.float32, requires_grad=False)
			train_y = torch.tensor([-0.5,2.0,1.0,INF,INF],device=device, dtype=torch.float32, requires_grad=False) # We place inf to emphasize the absence of measurement
			train_l = torch.tensor([+1, +1, +1, -1, -1],device=device, requires_grad=False, dtype=torch.float32)
		elif eval_type == 2:
			train_x = torch.tensor([[0.7],[0.9]],device=device, dtype=torch.float32, requires_grad=False)
			train_y = torch.tensor([INF,INF],device=device, dtype=torch.float32, requires_grad=False) # We place inf to emphasize the absence of measurement
			train_l = torch.tensor([-1, -1],device=device, requires_grad=False, dtype=torch.float32)
		else:
			train_x = torch.tensor([[0.7],[0.9]],device=device, dtype=torch.float32, requires_grad=False)
			train_y = torch.tensor([-0.5,2.0],device=device, dtype=torch.float32, requires_grad=False) # We place inf to emphasize the absence of measurement
			train_l = torch.tensor([+1, +1],device=device, requires_grad=False, dtype=torch.float32)

		# Put them together:
		train_yl = torch.cat([train_y[:,None], train_l[:,None]],dim=1)
	elif dim == 2:
		branin_fun = Branin(noise_std=None, negate=False)
		train_x = draw_sobol_samples(bounds=torch.tensor(([0.0]*dim,[1.0]*dim)),n=4,q=1).squeeze(1)
		train_y = branin_fun(train_x)
	elif dim == 6:
		hartman_fun = Hartmann()
		train_x = draw_sobol_samples(bounds=torch.tensor(([0.0]*dim,[1.0]*dim)),n=4,q=1).squeeze(1)
		train_y = hartman_fun(train_x)

	return train_x, train_yl
def get_initial_evaluations(dim):

    if dim == 1:
        # Initial points used for figure 1 in the paper.
        train_x = torch.Tensor([[0.93452506], [0.18872502], [0.89790337],
                                [0.95841797], [0.82335255], [0.45000000],
                                [0.50000000]])
        train_y = torch.Tensor([
            -0.4532849, -0.66614552, -0.92803395, 0.08880341, -0.27683621,
            1.000000, 1.500000
        ])
    elif dim == 2:
        branin_fun = Branin(noise_std=None, negate=False)
        train_x = draw_sobol_samples(bounds=torch.Tensor(
            ([0.0] * dim, [1.0] * dim)),
                                     n=4,
                                     q=1).squeeze(1)
        train_y = branin_fun(train_x)
    elif dim == 6:
        hartman_fun = Hartmann()
        train_x = draw_sobol_samples(bounds=torch.Tensor(
            ([0.0] * dim, [1.0] * dim)),
                                     n=4,
                                     q=1).squeeze(1)
        train_y = hartman_fun(train_x)

    return train_x, train_y
Exemplo n.º 3
0
def get_initial_evaluations(which_objective, function_obj, function_cons):

    assert which_objective in [
        "hart6D", "micha10D", "simple1D"
    ], "Objective function <which_objective> must be {'hart6D','micha10D','simple1D'}"

    # Get initial evaluation:
    if which_objective == "hart6D":
        train_x = torch.Tensor([[
            0.32124528, 0.00573107, 0.07254258, 0.90988337, 0.00164314,
            0.41116992
        ]])  # Randomly computed

    if which_objective == "micha10D":
        train_x = torch.Tensor([[
            0.65456088, 0.22632844, 0.50252072, 0.80747863, 0.11509346,
            0.73440179, 0.06093292, 0.464906, 0.01544494, 0.90179168
        ]])  # Randomly computed

    # Get initial evaluation:
    if which_objective == "simple1D":
        train_x = draw_sobol_samples(bounds=torch.Tensor(([0.0], [1.0])),
                                     n=1,
                                     q=1).squeeze(1)

    # Get initial evaluation in f(x):
    train_y_obj = function_obj(train_x)

    # Get initial evaluation in g(x):
    train_y_cons = function_cons(train_x)
    return train_x, train_y_obj, train_y_cons
Exemplo n.º 4
0
 def test_draw_sobol_samples(self):
     batch_shapes = [None, [3, 5], torch.Size([2]), (5, 3, 2, 3), []]
     for d, q, n, batch_shape, seed, dtype in itertools.product(
         (1, 3),
         (1, 2),
         (2, 5),
             batch_shapes,
         (None, 1234),
         (torch.float, torch.double),
     ):
         tkwargs = {"device": self.device, "dtype": dtype}
         bounds = torch.stack([torch.rand(d),
                               1 + torch.rand(d)]).to(**tkwargs)
         samples = draw_sobol_samples(bounds=bounds,
                                      n=n,
                                      q=q,
                                      batch_shape=batch_shape,
                                      seed=seed)
         batch_shape = batch_shape or torch.Size()
         self.assertEqual(samples.shape, torch.Size([n, *batch_shape, q,
                                                     d]))
         self.assertTrue(torch.all(samples >= bounds[0]))
         self.assertTrue(torch.all(samples <= bounds[1]))
         self.assertEqual(samples.device.type, self.device.type)
         self.assertEqual(samples.dtype, dtype)
    def get_next_point(self) -> (Tensor, Tensor):

        if self.model.models[
                idxm["obj"]].train_targets is None:  # No safe evaluations case
            self.eta_c = torch.zeros(1, device=device, dtype=dtype)
            self.x_eta_c = torch.zeros((1, self.dim),
                                       device=device,
                                       dtype=dtype)
            self.only_prob = True
        else:

            # The following functions need to be called in the given order:
            try:
                self.update_eta_c(
                    rho_t=self.rho_conserv
                )  # Update min_x mu(x|D) s.t. Pr(g(x) <= 0) > rho_t
            except Exception as inst:
                logger.info("Exception (!) type: {0:s} | args: {1:s}".format(
                    str(type(inst)), str(inst.args)))
                logger.info("Not optimizing eta_c ...")

            # self.best_f = self.eta_c
            self.best_f = self.get_best_constrained_evaluation(
            ) - self.model.models[idxm["obj"]].likelihood.noise.sqrt()[0].view(
                1)
            self.only_prob = False

        self.x_next, self.alpha_next = self.get_acqui_fun_maximizer()

        # Prevent from getting stuck into global minima:
        close_points, _ = self.model_list.models[
            idxm["cons"]]._identify_stable_close_to_unstable(
                X_sta=self.x_next.cpu().numpy(),
                X_uns=self.model_list.models[
                    idxm["cons"]].train_x_sorted.cpu().numpy(),
                top_dist=math.sqrt(self.dim) * 0.005,
                verbosity=False)
        if len(close_points) > 0:
            logger.info(
                "Changed the evaluation to random as it was very close to an existing evaluation, within math.sqrt(self.dim)*0.005 = {0:f}"
                .format(math.sqrt(self.dim) * 0.005))
            self.x_next = draw_sobol_samples(bounds=torch.Tensor(
                [[0.0] * self.dim, [1.0] * self.dim]),
                                             n=1,
                                             q=1).view(-1, self.dim)

        if self.x_next is not None and self.alpha_next is not None:
            logger.info(
                "xnext: " +
                str(self.x_next.view((1, self.dim)).detach().cpu().numpy()))
            logger.info("alpha_next: {0:2.2f}".format(self.alpha_next.item()))
        else:
            logger.info("xnext: None")
            logger.info("alpha_next: None")

        logger.info("self.x_eta_c: " + str(self.x_eta_c))
        logger.info("self.eta_c: " + str(self.eta_c))
        logger.info("self.best_f: " + str(self.best_f))

        return self.x_next, self.alpha_next
    def gen(
        self,
        n: int = 1,
        model_gen_options: Optional[Dict[str, Any]] = None,
        explore_features: Optional[List[int]] = None,
    ) -> Tuple[torch.Tensor, Optional[List[Dict[str, Any]]]]:
        options = model_gen_options or {}
        num_ts_points = options.get("num_ts_points", 1000)  # OK for 2-d

        # Generate the points at which to sample
        X = draw_sobol_samples(bounds=self.bounds_, n=num_ts_points, q=1).squeeze(1)
        # Fix any explore features
        if explore_features is not None:
            for idx in explore_features:
                val = (
                    self.bounds_[0, idx]
                    + torch.rand(1, dtype=self.dtype)
                    * (self.bounds_[1, idx] - self.bounds_[0, idx])
                ).item()
                X[:, idx] = val

        # Draw n samples
        f_samp = self.sample(X, num_samples=n, num_rejection_samples=500)

        # Find the point closest to target
        dist = torch.abs(self.objective(f_samp) - self.target_value)
        best_indx = torch.argmin(dist, dim=1)
        return X[best_indx], {}
Exemplo n.º 7
0
    def gen(
        self,
        num_points:
        int,  # Current implementation only generates 1 point at a time
        model: MonotonicRejectionGP,
    ) -> np.ndarray:
        """Query next point(s) to run by optimizing the acquisition function.
        Args:
            num_points (int, optional): Number of points to query.
            model (AEPsychMixin): Fitted model of the data.
        Returns:
            np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
        """

        # Generate the points at which to sample
        X = draw_sobol_samples(bounds=model.bounds_, n=self.num_ts_points,
                               q=1).squeeze(1)
        # Fix any explore features
        if self.explore_features is not None:
            for idx in self.explore_features:
                val = (model.bounds_[0, idx] + torch.rand(1) *
                       (model.bounds_[1, idx] - model.bounds_[0, idx])).item()
                X[:, idx] = val

        # Draw n samples
        f_samp = model.sample(
            X,
            num_samples=self.n_samples,
            num_rejection_samples=self.n_rejection_samples,
        )

        # Find the point closest to target
        dist = torch.abs(self.objective(f_samp) - self.target_value)
        best_indx = torch.argmin(dist, dim=1)
        return X[best_indx].numpy()
Exemplo n.º 8
0
def generate_initial_data(n=100):
    # generate training data
    train_x = draw_sobol_samples(bounds=problem.bounds,
                                 n=1,
                                 q=n,
                                 seed=torch.randint(1000000,
                                                    (1, )).item()).squeeze(0)
    train_obj = problem(train_x)
    return train_x, train_obj
Exemplo n.º 9
0
def sample_truncated_normal_perturbations(
    X: Tensor,
    n_discrete_points: int,
    sigma: float,
    bounds: Tensor,
    qmc: bool = True,
) -> Tensor:
    r"""Sample points around `X`.

    Sample perturbed points around `X` such that the added perturbations
    are sampled from N(0, sigma^2 I) and truncated to be within [0,1]^d.

    Args:
        X: A `n x d`-dim tensor starting points.
        n_discrete_points: The number of points to sample.
        sigma: The standard deviation of the additive gaussian noise for
            perturbing the points.
        bounds: A `2 x d`-dim tensor containing the bounds.
        qmc: A boolean indicating whether to use qmc.

    Returns:
        A `n_discrete_points x d`-dim tensor containing the sampled points.
    """
    X = normalize(X, bounds=bounds)
    d = X.shape[1]
    # sample points from N(X_center, sigma^2 I), truncated to be within
    # [0, 1]^d.
    if X.shape[0] > 1:
        rand_indices = torch.randint(X.shape[0], (n_discrete_points, ),
                                     device=X.device)
        X = X[rand_indices]
    if qmc:
        std_bounds = torch.zeros(2, d, dtype=X.dtype, device=X.device)
        std_bounds[1] = 1
        u = draw_sobol_samples(bounds=std_bounds, n=n_discrete_points,
                               q=1).squeeze(1)
    else:
        u = torch.rand((n_discrete_points, d), dtype=X.dtype, device=X.device)
    # compute bounds to sample from
    a = -X
    b = 1 - X
    # compute z-score of bounds
    alpha = a / sigma
    beta = b / sigma
    normal = Normal(0, 1)
    cdf_alpha = normal.cdf(alpha)
    # use inverse transform
    perturbation = normal.icdf(cdf_alpha + u *
                               (normal.cdf(beta) - cdf_alpha)) * sigma
    # add perturbation and clip points that are still outside
    perturbed_X = (X + perturbation).clamp(0.0, 1.0)
    return unnormalize(perturbed_X, bounds=bounds)
Exemplo n.º 10
0
    def _sample_hyperparameters_within_bounds(self, Nsamples):

        # Get a sample from the prior for initialization:
        new_seed = torch.randint(low=0, high=100000, size=(1, )).item(
        )  # Top-level seeds have an impact on this one herein; contrary to the case new_seed = None
        hyperpars_restarts = draw_sobol_samples(bounds=torch.tensor(
            self.hyperpars_bounds),
                                                n=Nsamples,
                                                q=1,
                                                seed=new_seed)
        hyperpars_restarts = hyperpars_restarts.squeeze(
            1)  # Remove batch dimension [n q dim] -> [n dim]

        return hyperpars_restarts
def generate_initial_data(init_num, obj_func, time_list, global_start_time):
    # generate training data. caution: train_x in [0, 1]
    train_x = draw_sobol_samples(bounds=standard_bounds,
                                 n=1,
                                 q=init_num,
                                 seed=torch.randint(1000000,
                                                    (1, )).item()).squeeze(0)
    train_obj = []
    for x in train_x:
        y = obj_func(x)
        train_obj.append(y)
        global_time = time.time() - global_start_time
        time_list.append(global_time)
    train_obj = torch.tensor(train_obj, **tkwargs).reshape(init_num, -1)
    return train_x, train_obj
Exemplo n.º 12
0
    def fit(
        self, train_x: Tensor, train_y: Tensor, bounds: List[Tuple[float, float]]
    ) -> None:
        """
        Fit the model.

        Args:
            train_x: Train X.
            train_y: Train Y. Should be (n x 1).
            bounds: List of (lb, ub) tuples for each column in X.
        """
        self.dtype = train_x.dtype
        self.device = train_x.device
        bounds_ = torch.tensor(bounds, dtype=self.dtype)
        self.bounds_ = bounds_.transpose(0, 1)
        # Select inducing points
        self.inducing_points = draw_sobol_samples(
            bounds=self.bounds_, n=self.num_induc, q=1
        ).squeeze(1)
        self._set_model(train_x, train_y)
Exemplo n.º 13
0
    def gen(self, num_points: int = 1, noise_scale=0.2, **kwargs):

        # Generate the points at which to sample
        X = draw_sobol_samples(bounds=torch.Tensor(np.c_[self.lb, self.ub]).T,
                               n=self.samps,
                               q=1).squeeze(1)

        # Draw n samples
        f_samp = self.sample(X, num_samples=1000)
        acq = self._get_acquisition_fn()
        acq_vals = acq.acquisition(acq.objective(f_samp))
        # normalize
        acq_vals = acq_vals - acq_vals.min()
        acq_vals = acq_vals / acq_vals.max()
        # add noise
        acq_vals = acq_vals + torch.randn_like(acq_vals) * noise_scale

        # Find the point closest to target
        best_vals, best_indx = torch.topk(acq_vals, k=num_points)
        return X[best_indx]
Exemplo n.º 14
0
def generate_train_dataset(objective, bounds, task_list, training_size=20):
    # Sample data for each base task
    data_by_task = {}
    for task in task_list:
        # draw points from a sobol sequence
        raw_x = draw_sobol_samples(bounds=bounds,
                                   n=training_size,
                                   q=1,
                                   seed=task + 5397923).squeeze(1)
        # get observed values
        f_x = objective(raw_x, task)
        train_y = f_x + noise_std * torch.randn_like(f_x)
        train_yvar = torch.full_like(train_y, noise_std**2)
        # store training data
        data_by_task[task] = {
            # scale x to [0, 1]
            'train_x': normalize(raw_x, bounds=bounds),
            'train_y': train_y,
            'train_yvar': train_yvar,
        }
    return data_by_task
Exemplo n.º 15
0
        return torch.cat([y_out.view(-1, 1), l_out.view(-1, 1)], dim=1)


if __name__ == "__main__":

    dim = 2

    cbr = ConsCircle(noise_std=0.01)

    from botorch.utils.sampling import draw_sobol_samples

    bounds = torch.Tensor([[0.0] * dim, [1.0] * dim])
    Nsamples = 20
    Nrep = 20
    x0_candidates = draw_sobol_samples(bounds=bounds, n=Nsamples, q=1).squeeze(
        1)  # Get only unstable evaluations

    ind_stable = []
    val_list = []
    for k in range(Nsamples):

        is_stable = True
        ii = 0
        while is_stable and ii < Nrep:

            val = cbr.evaluate(x0_candidates[k, :].view(-1, dim),
                               with_noise=True)

            is_stable = val[0, 1] == +1

            ii += 1
Exemplo n.º 16
0
def optimize_UCB(acq_function, x_bounds, unscale_y_fxn, seed):
    """Optimize UCB using random restarts"""
    # The alternative would be to call botorch.optim.joint_optimize().
    # However, that function is rather specialized for EI.
    # Hence a more custom option for ensuring good performance for UCB.
    x_init_batch_all = None
    y_init_batch_all = None
    for rnd in range(GPConstants.N_RESTART_ROUNDS):
        # TODO: init/pass seed to draw_sobol_samples() for reproducibility.
        x_rnd = draw_sobol_samples(bounds=x_bounds,
                                   seed=seed,
                                   n=GPConstants.N_RESTART_CANDIDATES,
                                   q=1)
        # The code below is like initialize_q_batch(), but with stability checks.
        # botorch.optim.initialize_q_batch() also does a few more hacks like:
        # max_val, max_idx = torch.max(y_rnd, dim=0)
        # if max_idx not in idcs: idcs[-1] = max_idx # make sure we get the maximum
        # These hacks don't seem to help the worst cases, so we don't include them.
        x_init_batch = None
        y_init_batch = None
        try:
            with torch.no_grad():
                y_rnd = acq_function(x_rnd)
            finite_ids = torch.isfinite(y_rnd)
            x_rnd_ok = x_rnd[finite_ids]
            y_rnd_ok = y_rnd[finite_ids]
            y_rnd_std = y_rnd.std()
            if torch.isfinite(y_rnd_std) and y_rnd_std > GPConstants.MIN_STD:
                z = y_rnd - y_rnd.mean() / y_rnd_std
                weights = torch.exp(1.0 * z)
                bad_weights = (torch.isnan(weights).any()
                               or torch.isinf(weights).any()
                               or (weights < 0).any() or weights.sum() <= 0)
                if not bad_weights:
                    idcs = torch.multinomial(weights,
                                             GPConstants.N_RESTARTS_PER_ROUND)
                    x_init_batch = x_rnd_ok[idcs]
                    y_init_batch = y_rnd_ok[idcs]
            if x_init_batch is None and x_rnd_ok.size(0) > 0:
                idcs = torch.randperm(n=x_rnd_ok.size(0))
                x_init_batch = x_rnd_ok[idcs][:GPConstants.
                                              N_RESTARTS_PER_ROUND]
                y_init_batch = y_rnd_ok[idcs][:GPConstants.
                                              N_RESTARTS_PER_ROUND]
        except RuntimeError as e:
            logging.info('WARNING: acq_function threw RuntimeError:')
            logging.info(e)
        if x_init_batch is None: continue  # GP-based queries failed
        if x_init_batch_all is None:
            x_init_batch_all = x_init_batch
            y_init_batch_all = y_init_batch
        else:
            x_init_batch_all = torch.cat([x_init_batch_all, x_init_batch],
                                         dim=0)
            y_init_batch_all = torch.cat([y_init_batch_all, y_init_batch],
                                         dim=0)
    # If GP-based queries failed (e.g. Matern Cholesky failed) use random pts.
    if x_init_batch_all is None:
        logging.info('WARNING: all acq_function tries failed; sample randomly')
        nrnd = GPConstants.N_RESTARTS_PER_ROUND * GPConstants.N_RESTART_ROUNDS
        x_init_batch_all = draw_sobol_samples(bounds=x_bounds, n=nrnd, q=1)
    else:
        # Print logs about predicted y of the points.
        y_init_batch_all_sorted, _ = y_init_batch_all.sort(descending=True)
        logging.info('optimize_UCB y_init_batch_all scaled')
        logging.info(y_init_batch_all.size())
        logging.info(y_init_batch_all_sorted)
        y_init_batch_all_unscaled, _ = unscale_y_fxn(y_init_batch_all).sort(
            descending=True)
        logging.info('optimize_UCB y_init_batch_all unscaled')
        logging.info(y_init_batch_all_sorted.size())
        logging.info(y_init_batch_all_unscaled)
    # ATTENTION: gen_candidates_scipy does not clean up GPU tensors, memory
    # usage sometimes grows, and gc.collect() does not help.
    # TODO: Need to file a botorch bug.
    try:
        batch_candidates, batch_acq_values = gen_candidates_scipy(
            initial_conditions=x_init_batch_all,
            acquisition_function=acq_function,
            lower_bounds=x_bounds[0],
            upper_bounds=x_bounds[1],
            options={
                'maxiter': GPConstants.MAX_ACQ_ITER,
                'method': 'L-BFGS-B'
            })  # select L-BFGS-B for reasonable speed
        assert (torch.isfinite(batch_candidates).all())
        #remove_too_close(gp_model, batch_candidates, batch_acq_values)
        # same code as in botorch.gen.get_best_candidates()
        best_acq_y, best_id = torch.max(batch_acq_values.view(-1), dim=0)
        next_x = batch_candidates[best_id].squeeze(0).detach()
    except RuntimeError as e:
        logging.info('WARNING: gen_candidates_scipy threw RuntimeError:')
        logging.info(e)
        next_x = x_init_batch_all[0].squeeze()
        best_acq_y = torch.tensor(-1000000.0).to(device=next_x.device)
    # Check x is within the [0,1] boundaries.
    if (next_x < 0).any() or (next_x > 1).all() or (next_x != next_x).any():
        print('WARNING: GP optimization returned next_x', next_x)
        next_x = torch.zeros_like(next_x)
    return next_x, best_acq_y
Exemplo n.º 17
0
def gen_batch_initial_conditions(acq_function,
                                 bounds,
                                 q,
                                 num_restarts,
                                 raw_samples,
                                 options=None):
    r"""[Copy of original botorch function]
    
    Generate a batch of initial conditions for random-restart optimziation.

    Args:
        acq_function: The acquisition function to be optimized.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
        q: The number of candidates to consider.
        num_restarts: The number of starting points for multistart acquisition
            function optimization.
        raw_samples: The number of raw samples to consider in the initialization
            heuristic.
        options: Options for initial condition generation. For valid options see
            `initialize_q_batch` and `initialize_q_batch_nonneg`. If `options`
            contains a `nonnegative=True` entry, then `acq_function` is
            assumed to be non-negative (useful when using custom acquisition
            functions).

    Returns:
        A `num_restarts x q x d` tensor of initial conditions.

    Example:
        >>> qEI = qExpectedImprovement(model, best_f=0.2)
        >>> bounds = torch.tensor([[0.], [1.]])
        >>> Xinit = gen_batch_initial_conditions(
        >>>     qEI, bounds, q=3, num_restarts=25, raw_samples=500
        >>> )
    """
    options = options or {}
    seed: Optional[int] = options.get("seed")
    batch_limit: Optional[int] = options.get("batch_limit")
    batch_initial_arms: Tensor
    factor, max_factor = 1, 5
    init_kwargs = {}
    device = bounds.device
    bounds = bounds.cpu()
    if "eta" in options:
        init_kwargs["eta"] = options.get("eta")
    if options.get("nonnegative") or is_nonnegative(acq_function):
        init_func = initialize_q_batch_nonneg
        if "alpha" in options:
            init_kwargs["alpha"] = options.get("alpha")
    else:
        init_func = initialize_q_batch

    q = 1 if q is None else q
    # the dimension the samples are drawn from
    dim = bounds.shape[-1] * q
    if dim > SobolEngine.MAXDIM and settings.debug.on():
        warnings.warn(
            f"Sample dimension q*d={dim} exceeding Sobol max dimension "
            f"({SobolEngine.MAXDIM}). Using iid samples instead.",
            SamplingWarning,
        )

    while factor < max_factor:
        with warnings.catch_warnings(record=True) as ws:
            n = raw_samples * factor
            if dim <= SobolEngine.MAXDIM:
                X_rnd = draw_sobol_samples(bounds=bounds, n=n, q=q, seed=seed)
            else:
                with manual_seed(seed):
                    # load on cpu
                    X_rnd_nlzd = torch.rand(n * dim, dtype=bounds.dtype).view(
                        n, q, bounds.shape[-1])
                X_rnd = bounds[0] + (bounds[1] - bounds[0]) * X_rnd_nlzd
            with torch.no_grad():
                if batch_limit is None:
                    batch_limit = X_rnd.shape[0]
                Y_rnd_list = []
                start_idx = 0
                while start_idx < X_rnd.shape[0]:
                    end_idx = min(start_idx + batch_limit, X_rnd.shape[0])
                    Y_rnd_curr = acq_function(
                        X_rnd[start_idx:end_idx].to(device=device)).cpu()
                    Y_rnd_list.append(Y_rnd_curr)
                    start_idx += batch_limit
                Y_rnd = torch.cat(Y_rnd_list)
            batch_initial_conditions = init_func(
                X=X_rnd, Y=Y_rnd, n=num_restarts,
                **init_kwargs).to(device=device)
            if not any(
                    issubclass(w.category, BadInitialCandidatesWarning)
                    for w in ws):
                return batch_initial_conditions
            if factor < max_factor:
                factor += 1
                if seed is not None:
                    seed += 1  # make sure to sample different X_rnd
    warnings.warn(
        "Unable to find non-zero acquisition function values - initial conditions "
        "are being selected randomly.",
        BadInitialCandidatesWarning,
    )
    return batch_initial_conditions
def gen_batch_initial_conditions(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    q: int,
    num_restarts: int,
    raw_samples: int,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
    post_processing_init: Optional[Callable[[Tensor], Tensor]] = None,
) -> Tensor:
    """
    This function generates a batch of initial conditions for random-restart optimization

    Parameters
    ----------
    :param acq_function: the acquisition function to be optimized.
    :param bounds: a `2 x d` tensor of lower and upper bounds for each column of `X`
    :param q: number of candidates
    :param num_restarts: number of starting points for multistart acquisition function optimization
    :param raw_samples: number of samples for initialization

    Optional parameters
    -------------------
    :param options: options for candidate generation
    :param post_processing_init: A function that post processes the generated initial samples
        (e.g. so that they fulfill some constraints).

    Returns
    -------
    :return: a `num_restarts x q x d` tensor of initial conditions
    """
    options = options or {}
    seed: Optional[int] = options.get("seed")  # pyre-ignore
    batch_limit: Optional[int] = options.get("batch_limit")  # pyre-ignore
    batch_initial_arms: Tensor
    factor, max_factor = 1, 5
    init_kwargs = {}
    if "eta" in options:
        init_kwargs["eta"] = options.get("eta")
    if options.get("nonnegative") or is_nonnegative(acq_function):
        init_func = initialize_q_batch_nonneg
        if "alpha" in options:
            init_kwargs["alpha"] = options.get("alpha")
    else:
        init_func = initialize_q_batch

    while factor < max_factor:
        with warnings.catch_warnings(record=True) as ws:
            X_rnd = draw_sobol_samples(
                bounds=bounds,
                n=raw_samples * factor,
                q=1 if q is None else q,
                seed=seed,
            )

            # Constraints the samples
            if post_processing_init is not None:
                X_rnd = post_processing_init(X_rnd)

            with torch.no_grad():
                if batch_limit is None:
                    batch_limit = X_rnd.shape[0]

                Y_rnd_list = []
                start_idx = 0
                while start_idx < X_rnd.shape[0]:
                    end_idx = min(start_idx + batch_limit, X_rnd.shape[0])
                    Y_rnd_curr = acq_function(X_rnd[start_idx:end_idx])
                    Y_rnd_list.append(Y_rnd_curr)
                    start_idx += batch_limit
                Y_rnd = torch.cat(Y_rnd_list).to(X_rnd)

            batch_initial_conditions = init_func(X=X_rnd,
                                                 Y=Y_rnd,
                                                 n=num_restarts,
                                                 **init_kwargs)

            if not any(
                    issubclass(w.category, BadInitialCandidatesWarning)
                    for w in ws):
                return batch_initial_conditions
            if factor < max_factor:
                factor += 1
    warnings.warn(
        "Unable to find non-zero acquisition function values - initial conditions "
        "are being selected randomly.",
        BadInitialCandidatesWarning,
    )
    return batch_initial_conditions
Exemplo n.º 19
0
def plot_prior_samps_1d():
    config = Config(
        config_dict={
            "common": {
                "outcome_type": "single_probit",
                "target": 0.75,
                "lb": "[-3]",
                "ub": "[3]",
            },
            "default_mean_covar_factory": {},
            "song_mean_covar_factory": {},
            "monotonic_mean_covar_factory": {"monotonic_idxs": "[0]"},
        }
    )
    lb = torch.Tensor([-3])
    ub = torch.Tensor([3])
    nsamps = 10
    gridsize = 50
    grid = _dim_grid(lower=lb, upper=ub, dim=1, gridsize=gridsize)
    np.random.seed(global_seed)
    torch.random.manual_seed(global_seed)
    with gpytorch.settings.prior_mode(True):
        rbf_mean, rbf_covar = default_mean_covar_factory(config)
        rbf_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=rbf_mean,
            covar_module=rbf_covar,
        )
        # add just two samples at high and low
        rbf_model.set_train_data(
            torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
        )
        rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))

        song_mean, song_covar = song_mean_covar_factory(config)
        song_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=song_mean,
            covar_module=song_covar,
        )
        song_model.set_train_data(
            torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
        )

        song_samps = song_model(grid).sample(torch.Size([nsamps]))

        mono_mean, mono_covar = monotonic_mean_covar_factory(config)
        mono_model = MonotonicRejectionGP(
            likelihood="probit-bernoulli",
            monotonic_idxs=[0],
            mean_module=mono_mean,
            covar_module=mono_covar,
        )

        bounds_ = torch.tensor([-3.0, 3.0])[:, None]
        # Select inducing points
        mono_model.inducing_points = draw_sobol_samples(
            bounds=bounds_, n=mono_model.num_induc, q=1
        ).squeeze(1)

        inducing_points_aug = mono_model._augment_with_deriv_index(
            mono_model.inducing_points, 0
        )
        scales = ub - lb
        dummy_train_x = mono_model._augment_with_deriv_index(
            torch.Tensor([-3, 3])[:, None], 0
        )
        mono_model.model = MixedDerivativeVariationalGP(
            train_x=dummy_train_x,
            train_y=torch.LongTensor([0, 1]),
            inducing_points=inducing_points_aug,
            scales=scales,
            fixed_prior_mean=torch.Tensor([0.75]),
            covar_module=mono_covar,
            mean_module=mono_mean,
        )
        mono_samps = mono_model.sample(grid, nsamps)

    fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
    fig.tight_layout(rect=[0.01, 0.03, 1, 0.9])
    fig.suptitle("GP prior samples (probit-transformed)")
    ax[0].plot(grid.squeeze(), norm.cdf(song_samps.T), "b")
    ax[0].set_ylabel("Response Probability")
    ax[0].set_title("Linear kernel")

    ax[1].plot(grid.squeeze(), norm.cdf(rbf_samps.T), "b")
    ax[1].set_xlabel("Intensity")
    ax[1].set_title("RBF kernel (nonmonotonic)")

    ax[2].plot(grid.squeeze(), norm.cdf(mono_samps.T), "b")
    ax[2].set_title("RBF kernel (monotonic)")
    return fig
Exemplo n.º 20
0
def plot_prior_samps_2d():
    config = Config(
        config_dict={
            "common": {
                "outcome_type": "single_probit",
                "target": 0.75,
                "lb": "[-3, -3]",
                "ub": "[3, 3]",
            },
            "default_mean_covar_factory": {},
            "song_mean_covar_factory": {},
            "monotonic_mean_covar_factory": {"monotonic_idxs": "[1]"},
        }
    )
    lb = torch.Tensor([-3, -3])
    ub = torch.Tensor([3, 3])
    nsamps = 5
    gridsize = 30
    grid = _dim_grid(lower=lb, upper=ub, dim=2, gridsize=gridsize)
    np.random.seed(global_seed)
    torch.random.manual_seed(global_seed)
    with gpytorch.settings.prior_mode(True):
        rbf_mean, rbf_covar = default_mean_covar_factory(config)
        rbf_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=rbf_mean,
            covar_module=rbf_covar,
        )
        # add just two samples at high and low
        rbf_model.set_train_data(torch.Tensor([-3, -3])[:, None], torch.LongTensor([0]))
        rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))

        song_mean, song_covar = song_mean_covar_factory(config)
        song_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=song_mean,
            covar_module=song_covar,
        )
        song_model.set_train_data(
            torch.Tensor([-3, -3])[:, None], torch.LongTensor([0])
        )

        song_samps = song_model(grid).sample(torch.Size([nsamps]))

        mono_mean, mono_covar = monotonic_mean_covar_factory(config)
        mono_model = MonotonicRejectionGP(
            likelihood="probit-bernoulli",
            monotonic_idxs=[1],
            mean_module=mono_mean,
            covar_module=mono_covar,
            num_induc=1000,
        )

        bounds_ = torch.tensor([-3.0, -3.0, 3.0, 3.0]).reshape(2, -1)
        # Select inducing points
        mono_model.inducing_points = draw_sobol_samples(
            bounds=bounds_, n=mono_model.num_induc, q=1
        ).squeeze(1)

        inducing_points_aug = mono_model._augment_with_deriv_index(
            mono_model.inducing_points, 0
        )
        scales = ub - lb
        dummy_train_x = mono_model._augment_with_deriv_index(
            torch.Tensor([-3, 3])[None, :], 0
        )
        mono_model.model = MixedDerivativeVariationalGP(
            train_x=dummy_train_x,
            train_y=torch.LongTensor([0]),
            inducing_points=inducing_points_aug,
            scales=scales,
            fixed_prior_mean=torch.Tensor([0.75]),
            covar_module=mono_covar,
            mean_module=mono_mean,
        )
        mono_samps = mono_model.sample(grid, nsamps)

    intensity_grid = np.linspace(-3, 3, gridsize)
    fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
    fig.tight_layout(rect=[0, 0.03, 1, 0.9])
    fig.suptitle("Prior samples")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in song_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[0].plot(intensity_grid, plotsamps, "b")
    ax[0].set_title("Linear kernel model")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in rbf_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[1].plot(intensity_grid, plotsamps, "b")
    ax[1].set_title("Nonmonotonic RBF kernel model")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in mono_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[2].plot(intensity_grid, plotsamps, "b")
    ax[2].set_title("Monotonic RBF kernel model")

    return fig
Exemplo n.º 21
0
        return self.obj_inst.cons_value

    def __call__(self, x_in, with_noise=False):
        return self.evaluate(x_in, with_noise=with_noise)


if __name__ == "__main__":

    # dim = 8
    # dim = 4
    dim = 5
    obj_fun = QuadrupedObj(dim=dim)
    cons_fun = QuadrupedCons(obj_fun)

    train_x = draw_sobol_samples(bounds=torch.tensor([[0.] * dim, [1.] * dim]),
                                 n=1,
                                 q=1).squeeze(
                                     1)  # Get only unstable evaluations

    # # 4D points:
    # # train_x = torch.tensor([[3.3298e-01, 1.0000e+00, 4.5455e-01, 1.3440e-02]])
    # train_x = torch.tensor([[0.6223, 0.9955, 0.4433, 0.5836]]) # Max height 2020 Jul 27, after 13:27 train_y_obj_min: tensor(0.1400)
    # # train_x = torch.tensor([[0.7979372, 0.9929017, 0.71484804, 0.03631881]]) # Max height 2020 Jul 27, after 13:27 train_y_obj_min: tensor(0.1600)

    val_cost = obj_fun(train_x)
    val_constraint = cons_fun(train_x)
    is_stable = val_constraint[0, 1] == +1

    logger.info("Entered values:")
    logger.info("    Label:            {0:s}".format("Success!" if is_stable ==
                                                     True else "Failure (!)"))
    logger.info("    Cost value:       {0:5f}".format(val_cost.item()))
Exemplo n.º 22
0
def gen_value_function_initial_conditions(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    num_restarts: int,
    raw_samples: int,
    current_model: Model,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
) -> Tensor:
    r"""Generate a batch of smart initializations for optimizing
    the value function of qKnowledgeGradient.

    This function generates initial conditions for optimizing the inner problem of
    KG, i.e. its value function, using the maximizer of the posterior objective.
    Intutively, the maximizer of the fantasized posterior will often be close to a
    maximizer of the current posterior. This function uses that fact to generate the
    initital conditions for the fantasy points. Specifically, a fraction of `1 -
    frac_random` (see options) of raw samples is generated by sampling from the set of
    maximizers of the posterior objective (obtained via random restart optimization)
    according to a softmax transformation of their respective values. This means that
    this initialization strategy internally solves an acquisition function
    maximization problem. The remaining raw samples are generated using
    `draw_sobol_samples`. All raw samples are then evaluated, and the initial
    conditions are selected according to the standard initialization strategy in
    'initialize_q_batch' individually for each inner problem.

    Args:
        acq_function: The value function instance to be optimized.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of
            task features.
        num_restarts: The number of starting points for multistart acquisition
            function optimization.
        raw_samples: The number of raw samples to consider in the initialization
            heuristic.
        current_model: The model of the KG acquisition function that was used to
            generate the fantasy model of the value function.
        options: Options for initial condition generation. These contain all
            settings for the standard heuristic initialization from
            `gen_batch_initial_conditions`. In addition, they contain
            `frac_random` (the fraction of fully random fantasy points),
            `num_inner_restarts` and `raw_inner_samples` (the number of random
            restarts and raw samples for solving the posterior objective
            maximization problem, respectively) and `eta` (temperature parameter
            for sampling heuristic from posterior objective maximizers).

    Returns:
        A `num_restarts x batch_shape x q x d` tensor that can be used as initial
        conditions for `optimize_acqf()`. Here `batch_shape` is the batch shape
        of value function model.

    Example:
        >>> fant_X = torch.rand(5, 1, 2)
        >>> fantasy_model = model.fantasize(fant_X, SobolQMCNormalSampler(16))
        >>> value_function = PosteriorMean(fantasy_model)
        >>> bounds = torch.tensor([[0., 0.], [1., 1.]])
        >>> Xinit = gen_value_function_initial_conditions(
        >>>     value_function, bounds, num_restarts=10, raw_samples=512,
        >>>     options={"frac_random": 0.25},
        >>> )
    """
    options = options or {}
    seed: Optional[int] = options.get("seed")
    frac_random: float = options.get("frac_random", 0.6)
    if not 0 < frac_random < 1:
        raise ValueError(
            f"frac_random must take on values in (0,1). Value: {frac_random}")

    # compute maximizer of the current value function
    value_function = _get_value_function(
        model=current_model,
        objective=acq_function.objective,
        sampler=getattr(acq_function, "sampler", None),
        project=getattr(acq_function, "project", None),
    )
    from botorch.optim.optimize import optimize_acqf

    fantasy_cands, fantasy_vals = optimize_acqf(
        acq_function=value_function,
        bounds=bounds,
        q=1,
        num_restarts=options.get("num_inner_restarts", 20),
        raw_samples=options.get("raw_inner_samples", 1024),
        return_best_only=False,
        options={
            k: v
            for k, v in options.items()
            if k not in ("frac_random", "num_inner_restarts",
                         "raw_inner_samples", "eta")
        },
    )

    batch_shape = acq_function.model.batch_shape
    # sampling from the optimizers
    n_value = int((1 - frac_random) * raw_samples)  # number of non-random ICs
    if n_value > 0:
        eta = options.get("eta", 2.0)
        weights = torch.exp(eta * standardize(fantasy_vals))
        idx = batched_multinomial(
            weights=weights.expand(*batch_shape, -1),
            num_samples=n_value,
            replacement=True,
        ).permute(-1, *range(len(batch_shape)))
        resampled = fantasy_cands[idx]
    else:
        resampled = torch.empty(0,
                                *batch_shape,
                                1,
                                bounds.shape[-1],
                                dtype=bounds.dtype)
    # add qMC samples
    randomized = draw_sobol_samples(bounds=bounds,
                                    n=raw_samples - n_value,
                                    q=1,
                                    batch_shape=batch_shape,
                                    seed=seed)
    # full set of raw samples
    X_rnd = torch.cat([resampled, randomized], dim=0)

    # evaluate the raw samples
    with torch.no_grad():
        Y_rnd = acq_function(X_rnd)

    # select the restart points using the heuristic
    return initialize_q_batch(X=X_rnd,
                              Y=Y_rnd,
                              n=num_restarts,
                              eta=options.get("eta", 2.0))
Exemplo n.º 23
0
def gen_batch_initial_conditions(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    q: int,
    num_restarts: int,
    raw_samples: int,
    fixed_features: Optional[Dict[int, float]] = None,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
    equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
) -> Tensor:
    r"""Generate a batch of initial conditions for random-restart optimziation.

    TODO: Support t-batches of initial conditions.

    Args:
        acq_function: The acquisition function to be optimized.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
        q: The number of candidates to consider.
        num_restarts: The number of starting points for multistart acquisition
            function optimization.
        raw_samples: The number of raw samples to consider in the initialization
            heuristic. Note: if `sample_around_best` is True (the default is False),
            then `2 * raw_samples` samples are used.
        fixed_features: A map `{feature_index: value}` for features that
            should be fixed to a particular value during generation.
        options: Options for initial condition generation. For valid options see
            `initialize_q_batch` and `initialize_q_batch_nonneg`. If `options`
            contains a `nonnegative=True` entry, then `acq_function` is
            assumed to be non-negative (useful when using custom acquisition
            functions). In addition, an "init_batch_limit" option can be passed
            to specify the batch limit for the initialization. This is useful
            for avoiding memory limits when computing the batch posterior over
            raw samples.
        inequality constraints: A list of tuples (indices, coefficients, rhs),
            with each tuple encoding an inequality constraint of the form
            `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
        equality constraints: A list of tuples (indices, coefficients, rhs),
            with each tuple encoding an inequality constraint of the form
            `\sum_i (X[indices[i]] * coefficients[i]) = rhs`.

    Returns:
        A `num_restarts x q x d` tensor of initial conditions.

    Example:
        >>> qEI = qExpectedImprovement(model, best_f=0.2)
        >>> bounds = torch.tensor([[0.], [1.]])
        >>> Xinit = gen_batch_initial_conditions(
        >>>     qEI, bounds, q=3, num_restarts=25, raw_samples=500
        >>> )
    """
    options = options or {}
    seed: Optional[int] = options.get("seed")
    batch_limit: Optional[int] = options.get(
        "init_batch_limit", options.get("batch_limit")
    )
    batch_initial_arms: Tensor
    factor, max_factor = 1, 5
    init_kwargs = {}
    device = bounds.device
    bounds_cpu = bounds.cpu()
    if "eta" in options:
        init_kwargs["eta"] = options.get("eta")
    if options.get("nonnegative") or is_nonnegative(acq_function):
        init_func = initialize_q_batch_nonneg
        if "alpha" in options:
            init_kwargs["alpha"] = options.get("alpha")
    else:
        init_func = initialize_q_batch

    q = 1 if q is None else q
    # the dimension the samples are drawn from
    effective_dim = bounds.shape[-1] * q
    if effective_dim > SobolEngine.MAXDIM and settings.debug.on():
        warnings.warn(
            f"Sample dimension q*d={effective_dim} exceeding Sobol max dimension "
            f"({SobolEngine.MAXDIM}). Using iid samples instead.",
            SamplingWarning,
        )

    while factor < max_factor:
        with warnings.catch_warnings(record=True) as ws:
            n = raw_samples * factor
            if inequality_constraints is None and equality_constraints is None:
                if effective_dim <= SobolEngine.MAXDIM:
                    X_rnd = draw_sobol_samples(bounds=bounds_cpu, n=n, q=q, seed=seed)
                else:
                    with manual_seed(seed):
                        # load on cpu
                        X_rnd_nlzd = torch.rand(
                            n, q, bounds_cpu.shape[-1], dtype=bounds.dtype
                        )
                    X_rnd = bounds_cpu[0] + (bounds_cpu[1] - bounds_cpu[0]) * X_rnd_nlzd
            else:
                X_rnd = (
                    get_polytope_samples(
                        n=n * q,
                        bounds=bounds,
                        inequality_constraints=inequality_constraints,
                        equality_constraints=equality_constraints,
                        seed=seed,
                        n_burnin=options.get("n_burnin", 10000),
                        thinning=options.get("thinning", 32),
                    )
                    .view(n, q, -1)
                    .cpu()
                )
            # sample points around best
            if options.get("sample_around_best", False):
                X_best_rnd = sample_points_around_best(
                    acq_function=acq_function,
                    n_discrete_points=n * q,
                    sigma=options.get("sample_around_best_sigma", 1e-3),
                    bounds=bounds,
                    subset_sigma=options.get("sample_around_best_subset_sigma", 1e-1),
                    prob_perturb=options.get("sample_around_best_prob_perturb"),
                )
                if X_best_rnd is not None:
                    X_rnd = torch.cat(
                        [
                            X_rnd,
                            X_best_rnd.view(n, q, bounds.shape[-1]).cpu(),
                        ],
                        dim=0,
                    )
            X_rnd = fix_features(X_rnd, fixed_features=fixed_features)
            with torch.no_grad():
                if batch_limit is None:
                    batch_limit = X_rnd.shape[0]
                Y_rnd_list = []
                start_idx = 0
                while start_idx < X_rnd.shape[0]:
                    end_idx = min(start_idx + batch_limit, X_rnd.shape[0])
                    Y_rnd_curr = acq_function(
                        X_rnd[start_idx:end_idx].to(device=device)
                    ).cpu()
                    Y_rnd_list.append(Y_rnd_curr)
                    start_idx += batch_limit
                Y_rnd = torch.cat(Y_rnd_list)
            batch_initial_conditions = init_func(
                X=X_rnd, Y=Y_rnd, n=num_restarts, **init_kwargs
            ).to(device=device)
            if not any(issubclass(w.category, BadInitialCandidatesWarning) for w in ws):
                return batch_initial_conditions
            if factor < max_factor:
                factor += 1
                if seed is not None:
                    seed += 1  # make sure to sample different X_rnd
    warnings.warn(
        "Unable to find non-zero acquisition function values - initial conditions "
        "are being selected randomly.",
        BadInitialCandidatesWarning,
    )
    return batch_initial_conditions
Exemplo n.º 24
0
def get_initial_evaluations(which_objective,function_obj,function_cons,cfg_Ninit_points,with_noise):

    assert which_objective in obj_fun_list, "Objective function <which_objective> must be {0:s}".format(str(obj_fun_list))

    # Get initial evaluation:
    if which_objective == "hart6D" or which_objective == "debug6D":
        # train_x = torch.Tensor([[0.32124528, 0.00573107, 0.07254258, 0.90988337, 0.00164314, 0.41116992]]) # Randomly computed  |  initial location 1
        # train_x = torch.Tensor([[0.1859, 0.3065, 0.0886, 0.8393, 0.1175, 0.3123]]) # Randomly computed (safe as well, according to the new constraint, and robust to noise_std = 0.01)  |  initial location 2
        train_x = torch.Tensor([[0.4493, 0.6189, 0.2756, 0.7961, 0.2482, 0.9121]]) # Randomly computed (safe as well, according to the new constraint, and robust to noise_std = 0.01)  |  initial location 3
 
    if which_objective == "micha10D":
        # train_x = torch.Tensor([[0.65456088, 0.22632844, 0.50252072, 0.80747863, 0.11509346, 0.73440179, 0.06093292, 0.464906, 0.01544494, 0.90179168]]) # Randomly computed
        train_x = torch.Tensor([[0.7139, 0.6342, 0.2331, 0.8299, 0.7615, 0.8232, 0.9008, 0.1899, 0.6961, 0.3240]])

    # Get initial evaluation in g(x):
    if which_objective == "simple1D":

        # Safe/unsafe bounds according to classireg.objectives.simple1D.Simple1D.true_minimum()
        safe_area1 = torch.Tensor(([0.0],[0.0834]))
        safe_area2 = torch.Tensor(([0.4167],[1.0]))
        unsafe_area = torch.Tensor(([0.0834],[0.4167]))

        # Sample from within the bounds:
        train_x_unsafe = draw_sobol_samples(bounds=unsafe_area,n=cfg_Ninit_points.unsafe,q=1).squeeze(1) # Get only unstable evaluations
        train_x_area2 = draw_sobol_samples(bounds=safe_area2,n=cfg_Ninit_points.safe,q=1).squeeze(1) # Get only stable evaluations

        # Concatenate:
        train_x = torch.cat([train_x_unsafe,train_x_area2])

    if which_objective == "branin2D":
        # train_x = draw_sobol_samples(bounds=torch.Tensor(([0.0]*2,[1.0]*2)),n=cfg_Ninit_points.total,q=1).squeeze(1)
        train_x = torch.tensor([[0.6255, 0.5784]])

    if which_objective == "camel2D":
        train_x = torch.tensor([[0.9846, 0.0587]])        

    if which_objective == "quadruped8D":
        # train_x = torch.tensor([[0.9846, 0.0587, 0.9846, 0.9846, 0.9846, 0.9846, 0.9846, 0.9846]]) 
        # train_x = torch.tensor([[1,0,0,0,1,1,0.666,0.666]]) # Correpsonds to 21.07.2020 Recording: after 13:52 (5 experiments)
        # train_x = torch.tensor([[0.0,0.0,1.0,1.0]]) # Correpsonds to 21.07.2020, 23.07.2020, 27.07.2020 Same as above but without kp_joint_min and kd_joint_max
        train_x = torch.tensor([[0.2,0.2,0.6666666666666666,0.6666666666666666,1.0]]) # Correpsonds to 29.07.2020

    if which_objective == "eggs2D":
        train_x = torch.Tensor([[0.5578, 0.0558]])

    if which_objective == "walker":
        # train_x = torch.tensor([[0.5  ,0.25, 0.5,  0.25, 0.5,  0.75]]) + 0.12*torch.randn(6) # Found with brute force, x_in \in 10**[-0.5,0.5], multiplicative; Stable 94 / 100
        # train_x = torch.tensor([[0.67110407, 0.24342352, 0.71659071, 0.37363523, 0.52991535, 0.49756885]]) # Stable 65 / 100 | 0.12
        train_x = torch.tensor([[0.26790537, 0.3768979 , 0.49344913, 0.18835246, 0.57790874, 0.7599986 ]]) # Stable 40 / 100 | 0.12 (never actually used)
        # train_x = torch.tensor([[0.38429936, 0.26406187, 0.59825079, 0.24133024, 0.43413793, 0.77263459]]) # Stable 97 / 100 | 0.12 (never actually used)

    if which_objective == "shubert4D":
        train_x = torch.tensor([[0.7162, 0.3331, 0.8390, 0.8885]]) # 11.1146

    # Evaluate objective and constraint(s):
    # NOTE: Do NOT change the order!!

    # Get initial evaluations in f(x):
    train_y_obj = function_obj(train_x,with_noise=with_noise)

    # Get initial evaluations in g(x):
    train_x_cons = train_x
    train_yl_cons = function_cons(train_x_cons,with_noise=False)

    # Check that the initial point is stable in micha10D:
    # pdb.set_trace()

    # Get rid of those train_y_obj for which the constraint is violated:
    train_y_obj = train_y_obj[train_yl_cons[:,1] == +1]
    train_x_obj = train_x[train_yl_cons[:,1] == +1,:]

    logger.info("train_x_obj: {0:s}".format(str(train_x_obj)))
    logger.info("train_y_obj: {0:s}".format(str(train_y_obj)))
    logger.info("train_x_cons: {0:s}".format(str(train_x_cons)))
    logger.info("train_yl_cons: {0:s}".format(str(train_yl_cons)))

    return train_x_obj, train_y_obj, train_x_cons, train_yl_cons