Пример #1
0
 def test_raise_botorch_exceptions(self):
     with self.assertRaises(BotorchError):
         raise BotorchError("message")
     with self.assertRaises(CandidateGenerationError):
         raise CandidateGenerationError("message")
     with self.assertRaises(UnsupportedError):
         raise UnsupportedError("message")
Пример #2
0
    def pareto_Y(self) -> Tensor:
        r"""This returns the non-dominated set.

        Returns:
            A `n_pareto x m`-dim tensor of outcomes.
        """
        try:
            return -self._neg_pareto_Y
        except AttributeError:
            raise BotorchError("pareto_Y has not been initialized")
Пример #3
0
    def pareto_Y(self) -> Tensor:
        r"""This returns the non-dominated set.

        Note: Internally, we store the negative pareto set (minimization).

        Returns:
            A `n_pareto x m`-dim tensor of outcomes.
        """
        if not hasattr(self, "_pareto_Y"):
            raise BotorchError("pareto_Y has not been initialized")
        return -self._pareto_Y
Пример #4
0
    def pareto_Y(self) -> Tensor:
        r"""This returns the non-dominated set.

        Note: in the batch case, this Pareto set is padded by repeating a
        Pareto point so that all batches have the same size Pareto set.

        Note: Internally, we store the negative Pareto set (minimization).

        Returns:
            A `(batch_shape) x max_n_pareto x m`-dim tensor of outcomes.
        """
        if not hasattr(self, "_pareto_Y"):
            raise BotorchError("pareto_Y has not been initialized")
        return -self._pareto_Y
Пример #5
0
def extract_batch_covar(mt_mvn: MultitaskMultivariateNormal) -> LazyTensor:
    r"""Extract a batched independent covariance matrix from an MTMVN.

    Args:
        mt_mvn: A multi-task multivariate normal with a block diagonal
            covariance matrix.

    Returns:
        A lazy covariance matrix consisting of a batch of the blocks of
            the diagonal of the MultitaskMultivariateNormal.

    """
    lazy_covar = mt_mvn.lazy_covariance_matrix
    if not isinstance(lazy_covar, BlockDiagLazyTensor):
        raise BotorchError(
            f"Expected BlockDiagLazyTensor, but got {type(lazy_covar)}.")
    return lazy_covar.base_lazy_tensor
Пример #6
0
def columnwise_clamp(
    X: Tensor,
    lower: Optional[Union[float, Tensor]] = None,
    upper: Optional[Union[float, Tensor]] = None,
    raise_on_violation: bool = False,
) -> Tensor:
    r"""Clamp values of a Tensor in column-wise fashion (with support for t-batches).

    This function is useful in conjunction with optimizers from the torch.optim
    package, which don't natively handle constraints. If you apply this after
    a gradient step you can be fancy and call it "projected gradient descent".
    This funtion is also useful for post-processing candidates generated by the
    scipy optimizer that satisfy bounds only up to numerical accuracy.

    Args:
        X: The `b x n x d` input tensor. If 2-dimensional, `b` is assumed to be 1.
        lower: The column-wise lower bounds. If scalar, apply bound to all columns.
        upper: The column-wise upper bounds. If scalar, apply bound to all columns.
        raise_on_violation: If `True`, raise an exception when the elments in `X`
            are out of the specified bounds (up to numerical accuracy). This is
            useful for post-processing candidates generated by optimizers that
            satisfy imposed bounds only up to numerical accuracy.

    Returns:
        The clamped tensor.
    """
    min_bounds = _expand_bounds(lower, X)
    max_bounds = _expand_bounds(upper, X)
    if min_bounds is not None and max_bounds is not None:
        if torch.any(min_bounds > max_bounds):
            raise ValueError("Minimum values must be <= maximum values")
    Xout = X
    if min_bounds is not None:
        Xout = Xout.max(
            min_bounds)  # torch.max compare and pick the large one.
    if max_bounds is not None:
        Xout = Xout.min(max_bounds)
    if raise_on_violation and not torch.allclose(Xout, X):
        raise BotorchError("Original value(s) are out of bounds.")
    return Xout
Пример #7
0
def _set_transformed_inputs(mll: MarginalLogLikelihood) -> None:
    r"""Update training inputs with transformed inputs.

    Args:
        mll: The marginal likelihood.
    """
    models = (
        mll.model.models if isinstance(mll, SumMarginalLogLikelihood) else [mll.model]
    )
    for m in models:
        if hasattr(m, "input_transform"):
            X_tf = m.input_transform.set_train_data_transform(m.train_inputs[0])
            if not hasattr(m, "set_train_data"):
                raise BotorchError(
                    "fit_gpytorch_model requires that a model has a set_train_data "
                    "method when an input_transform is used."
                )
            m.set_train_data(X_tf, strict=False)
            # TODO: override set_train_data in HeteroskedasticSingleTaskGP to do this
            # automatically
            if isinstance(m, HeteroskedasticSingleTaskGP):
                m.likelihood.noise_covar.noise_model.set_train_data(X_tf, strict=False)
Пример #8
0
    def __init__(self,
                 outcomes: Optional[List[int]] = None,
                 num_outcomes: Optional[int] = None) -> None:
        r"""Initialize Objective.

        Args:
            weights: `m'`-dim tensor of outcome weights.
            outcomes: A list of the `m'` indices that the weights should be
                applied to.
            num_outcomes: The total number of outcomes `m`
        """
        super().__init__()
        if outcomes is not None:
            if len(outcomes) < 2:
                raise BotorchTensorDimensionError(
                    "Must specify at least two outcomes for MOO.")
            if any(i < 0 for i in outcomes):
                if num_outcomes is None:
                    raise BotorchError(
                        "num_outcomes is required if any outcomes are less than 0."
                    )
                outcomes = normalize_indices(outcomes, num_outcomes)
            self.register_buffer("outcomes",
                                 torch.tensor(outcomes, dtype=torch.long))
Пример #9
0
 def test_botorch_exception_hierarchy(self):
     self.assertIsInstance(BotorchError(), Exception)
     self.assertIsInstance(CandidateGenerationError(), BotorchError)
     self.assertIsInstance(InputDataError(), BotorchError)
     self.assertIsInstance(UnsupportedError(), BotorchError)
     self.assertIsInstance(BotorchTensorDimensionError(), BotorchError)
Пример #10
0
    def __init__(
        self,
        inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        equality_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        bounds: Optional[Tensor] = None,
        interior_point: Optional[Tensor] = None,
    ) -> None:
        r"""Initialize PolytopeSampler.

        Args:
            inequality_constraints: Tensors `(A, b)` describing inequality
                constraints `A @ x <= b`, where `A` is a `n_ineq_con x d`-dim
                Tensor and `b` is a `n_ineq_con x 1`-dim Tensor, with `n_ineq_con`
                the number of inequalities and `d` the dimension of the sample space.
            equality_constraints: Tensors `(C, d)` describing the equality constraints
                `C @ x = d`, where `C` is a `n_eq_con x d`-dim Tensor and `d` is a
                `n_eq_con x 1`-dim Tensor with `n_eq_con` the number of equalities.
            bounds: A `2 x d`-dim tensor of box bounds, where `inf` (`-inf`) means
                that the respective dimension is unbounded above (below).
            interior_point: A `d x 1`-dim Tensor presenting a point in the
                (relative) interior of the polytope. If omitted, determined
                automatically by solving a Linear Program.
        """
        if inequality_constraints is None:
            if bounds is None:
                raise BotorchError(
                    "PolytopeSampler requires either inequality constraints or bounds."
                )
            A = torch.empty(0,
                            bounds.shape[-1],
                            dtype=bounds.dtype,
                            device=bounds.device)
            b = torch.empty(0, 1, dtype=bounds.dtype, device=bounds.device)
        else:
            A, b = inequality_constraints
        if bounds is not None:
            # add inequality constraints for bounds
            # TODO: make sure there are not deduplicate constraints
            A2, b2 = _convert_bounds_to_inequality_constraints(bounds=bounds)
            A = torch.cat([A, A2], dim=0)
            b = torch.cat([b, b2], dim=0)
        self.A = A
        self.b = b
        self.equality_constraints = equality_constraints

        if equality_constraints is not None:
            self.C, self.d = equality_constraints
            U, S, Vh = torch.linalg.svd(self.C)
            r = torch.nonzero(S).size(0)  # rank of matrix C
            self.nullC = Vh[r:, :].transpose(
                -1, -2)  # orthonormal null space of C,
            # satisfying # C @ nullC = 0 and nullC.T @ nullC = I
            # using the change of variables x=x0+nullC*y,
            # sample y satisfies A*nullC*y<=b-A*x0.
            # the linear constraint is automatically satisfied as x0 satisfies it.
        else:
            self.C = None
            self.d = None
            self.nullC = torch.eye(self.A.size(-1),
                                   dtype=self.A.dtype,
                                   device=self.A.device)

        self.new_A = self.A @ self.nullC  # doesn't depend on the initial point

        # initial point for the original, not transformed, problem
        if interior_point is not None:
            if self.feasible(interior_point):
                self.x0 = interior_point
            else:
                raise ValueError("The given input point is not feasible.")
        else:
            self.x0 = self.find_interior_point()
Пример #11
0
def infer_reference_point(
    pareto_Y: Tensor,
    max_ref_point: Optional[Tensor] = None,
    scale: float = 0.1,
    scale_max_ref_point: bool = False,
) -> Tensor:
    r"""Get reference point for hypervolume computations.

    This sets the reference point to be `ref_point = nadir - 0.1 * range`
    when there is no pareto_Y that is better than the reference point.

    [Ishibuchi2011]_ find 0.1 to be a robust multiplier for scaling the
    nadir point.

    Note: this assumes maximization of all objectives.

    Args:
        pareto_Y: A `n x m`-dim tensor of Pareto-optimal points.
        max_ref_point: A `m` dim tensor indicating the maximum reference point.
        scale: A multiplier used to scale back the reference point based on the
            range of each objective.
        scale_max_ref_point: A boolean indicating whether to apply scaling to
            the max_ref_point based on the range of each objective.

    Returns:
        A `m`-dim tensor containing the reference point.
    """

    if pareto_Y.shape[0] == 0:
        if max_ref_point is None:
            raise BotorchError(
                "Empty pareto set and no max ref point provided")
        if scale_max_ref_point:
            return max_ref_point - scale * max_ref_point.abs()
        return max_ref_point
    if max_ref_point is not None:
        better_than_ref = (pareto_Y > max_ref_point).all(dim=-1)
    else:
        better_than_ref = torch.full(pareto_Y.shape[:1],
                                     1,
                                     dtype=bool,
                                     device=pareto_Y.device)
    if max_ref_point is not None and better_than_ref.any():
        Y_range = pareto_Y[better_than_ref].max(dim=0).values - max_ref_point
        if scale_max_ref_point:
            return max_ref_point - scale * Y_range
        return max_ref_point
    elif pareto_Y.shape[0] == 1:
        # no points better than max_ref_point and only a single observation
        # subtract MIN_Y_RANGE to handle the case that pareto_Y is a singleton
        # with objective value of 0.
        return (pareto_Y -
                scale * pareto_Y.abs().clamp_min(MIN_Y_RANGE)).view(-1)
    # no points better than max_ref_point and multiple observations
    # make sure that each dimension of the nadir point is no greater than
    # the max_ref_point
    nadir = pareto_Y.min(dim=0).values
    if max_ref_point is not None:
        nadir = torch.min(nadir, max_ref_point)
    ideal = pareto_Y.max(dim=0).values
    # handle case where all values for one objective are the same
    Y_range = (ideal - nadir).clamp_min(MIN_Y_RANGE)
    return nadir - scale * Y_range