def __init__( self, model: Model, posterior_transform: Optional[PosteriorTransform] = None, **kwargs, ) -> None: r"""Base constructor for analytic acquisition functions. Args: model: A fitted single-outcome model. posterior_transform: A PosteriorTransform. If using a multi-output model, a PosteriorTransform that transforms the multi-output posterior into a single-output posterior is required. """ super().__init__(model=model) posterior_transform = self._deprecate_acqf_objective( posterior_transform=posterior_transform, objective=kwargs.get("objective"), ) if posterior_transform is None: if model.num_outputs != 1: raise UnsupportedError( "Must specify a posterior transform when using a " "multi-output model.") else: if not isinstance(posterior_transform, PosteriorTransform): raise UnsupportedError( "AnalyticAcquisitionFunctions only support PosteriorTransforms." ) self.posterior_transform = posterior_transform
def _deprecate_acqf_objective( cls, posterior_transform: Optional[Callable[[Posterior], Posterior]], objective: Optional[Module], ) -> Optional[Callable[[Posterior], Posterior]]: from botorch.acquisition.objective import ( ScalarizedObjective, ScalarizedPosteriorTransform, ) if objective is None: return posterior_transform warnings.warn( f"{cls.__name__} got a non-MC `objective`. The non-MC " "AcquisitionObjectives and the `objective` argument to" "AnalyticAcquisitionFunctions are DEPRECATED and will be removed in the" "next version. Use `posterior_transform` instead.", DeprecationWarning, ) if not isinstance(objective, ScalarizedObjective): raise UnsupportedError( f"{cls.__name__} only supports ScalarizedObjective " "(DEPRECATED) type objectives.") return ScalarizedPosteriorTransform(weights=objective.weights, offset=objective.offset)
def __init__( self, train_X: Tensor, train_Y: Tensor, nu: float = 2.5, train_iteration_fidelity: bool = True, train_data_fidelity: bool = True, likelihood: Optional[Likelihood] = None, ) -> None: if not train_iteration_fidelity and not train_data_fidelity: raise UnsupportedError( "You should have at least one fidelity parameter.") self._set_dimensions(train_X=train_X, train_Y=train_Y) kernel = LinearTruncatedFidelityKernel( nu=nu, dimension=train_X.shape[-1], train_iteration_fidelity=train_iteration_fidelity, train_data_fidelity=train_data_fidelity, batch_shape=self._aug_batch_shape, power_prior=GammaPrior(3.0, 3.0), ) covar_module = ScaleKernel( kernel, batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) super().__init__(train_X=train_X, train_Y=train_Y, covar_module=covar_module) self.to(train_X)
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None: if not isinstance(self.raw_acqf, AnalyticAcquisitionFunction): self.raw_acqf.set_X_pending(X_pending=X_pending) else: raise UnsupportedError( "The raw acquisition function is Analytic and does not account " "for X_pending yet." )
def _check_compatibility(models: ModelListGP) -> None: """Check if a ModelListGP can be converted.""" # Check that all submodules are of the same type. for modn, mod in models[0].named_modules(): mcls = mod.__class__ if not all(isinstance(_get_module(m, modn), mcls) for m in models[1:]): raise UnsupportedError( "Sub-modules must be of the same type across models.") # Check that each model is a BatchedMultiOutputGPyTorchModel. if not all(isinstance(m, BatchedMultiOutputGPyTorchModel) for m in models): raise UnsupportedError( "All models must be of type BatchedMultiOutputGPyTorchModel.") # TODO: Add support for HeteroskedasticSingleTaskGP. if any(isinstance(m, HeteroskedasticSingleTaskGP) for m in models): raise NotImplementedError( "Conversion of HeteroskedasticSingleTaskGP is currently unsupported." ) # TODO: Add support for custom likelihoods. if any(getattr(m, "_is_custom_likelihood", False) for m in models): raise NotImplementedError( "Conversion of models with custom likelihoods is currently unsupported." ) # TODO: Add support for outcome transforms. if any(getattr(m, "outcome_transform", None) is not None for m in models): raise UnsupportedError( "Conversion of models with outcome transforms is currently unsupported." ) # check that each model is single-output if not all(m._num_outputs == 1 for m in models): raise UnsupportedError("All models must be single-output.") # check that training inputs are the same if not all( torch.equal(ti, tj) for m in models[1:] for ti, tj in zip(models[0].train_inputs, m.train_inputs)): raise UnsupportedError( "training inputs must agree for all sub-models.") # check that there are no batched input transforms default_size = torch.Size([]) for m in models: if hasattr(m, "input_transform"): if (m.input_transform is not None and len(getattr(m.input_transform, "batch_shape", default_size)) != 0): raise UnsupportedError( "Batched input_transforms are not supported.") # check that all models have the same input transforms if any(hasattr(m, "input_transform") for m in models): if not all( m.input_transform.equals(models[0].input_transform) for m in models[1:]): raise UnsupportedError( "All models must have the same input_transforms.")
def __init__( self, train_X: Tensor, train_Y: Tensor, train_iteration_fidelity: bool = True, train_data_fidelity: bool = True, likelihood: Optional[Likelihood] = None, ) -> None: train_X, train_Y, _ = self._set_dimensions(train_X=train_X, train_Y=train_Y) num_fidelity = train_iteration_fidelity + train_data_fidelity ard_num_dims = train_X.shape[-1] - num_fidelity active_dimsX = list(range(train_X.shape[-1] - num_fidelity)) rbf_kernel = RBFKernel( ard_num_dims=ard_num_dims, batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), active_dims=active_dimsX, ) exp_kernel = ExpDecayKernel( batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), offset_prior=GammaPrior(3.0, 6.0), power_prior=GammaPrior(3.0, 6.0), ) ds_kernel = DownsamplingKernel( batch_shape=self._aug_batch_shape, offset_prior=GammaPrior(3.0, 6.0), power_prior=GammaPrior(3.0, 6.0), ) if train_iteration_fidelity and train_data_fidelity: active_dimsS1 = [train_X.shape[-1] - 1] active_dimsS2 = [train_X.shape[-1] - 2] exp_kernel.active_dims = torch.tensor(active_dimsS1) ds_kernel.active_dims = torch.tensor(active_dimsS2) kernel = rbf_kernel * exp_kernel * ds_kernel elif train_iteration_fidelity or train_data_fidelity: active_dimsS = [train_X.shape[-1] - 1] if train_iteration_fidelity: exp_kernel.active_dims = torch.tensor(active_dimsS) kernel = rbf_kernel * exp_kernel else: ds_kernel.active_dims = torch.tensor(active_dimsS) kernel = rbf_kernel * ds_kernel else: raise UnsupportedError( "You should have at least one fidelity parameter.") covar_module = ScaleKernel( kernel, batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) super().__init__(train_X=train_X, train_Y=train_Y, covar_module=covar_module) self.to(train_X)
def __init__(self, model: Model, objective: Optional[ScalarizedObjective] = None) -> None: r"""Base constructor for analytic acquisition functions. Args: model: A fitted single-outcome model. objective: A ScalarizedObjective (optional). """ super().__init__(model=model) if objective is None: if model.num_outputs != 1: raise UnsupportedError( "Must specify an objective when using a multi-output model." ) elif not isinstance(objective, ScalarizedObjective): raise UnsupportedError( "Only objectives of type ScalarizedObjective are supported for " "analytic acquisition functions.") self.objective = objective
def _construct_base_samples(self, posterior: Posterior, shape: torch.Size) -> None: r"""Generate quasi-random Normal base samples (if necessary). This function will generate a new set of base samples and set the `base_samples` buffer if one of the following is true: - `resample=True` - the MCSampler has no `base_samples` attribute. - `shape` is different than `self.base_samples.shape` (if `collapse_batch_dims=True`, then batch dimensions of will be automatically broadcasted as necessary). This shape is expected to be `sample_shape + base_sample_shape`, where `base_sample_shape` has been adjusted to account for `collapse_batch_dims` (i.e., the output of the function `_get_base_sample_shape`). Args: posterior: The Posterior for which to generate base samples. shape: The shape of the base samples to construct. """ if ( self.resample or _check_shape_changed(self.base_samples, self.batch_range, shape) or (not self.collapse_batch_dims and shape != self.base_samples.shape) ): batch_start, batch_end = self.batch_range sample_shape, base_sample_shape = split_shapes(shape) output_dim = ( base_sample_shape[:batch_start] + base_sample_shape[batch_end:] ).numel() if output_dim > SobolEngine.MAXDIM: raise UnsupportedError( "SobolQMCSampler only supports dimensions " f"`q * o <= {SobolEngine.MAXDIM}`. Requested: {output_dim}" ) base_samples = draw_sobol_normal_samples( d=output_dim, n=(sample_shape + base_sample_shape[batch_start:batch_end]).numel(), device=posterior.device, dtype=posterior.dtype, seed=self.seed, ) self.seed += 1 base_samples = base_samples.view(shape) self.register_buffer("base_samples", base_samples) elif self.collapse_batch_dims and shape != posterior.base_sample_shape: self.base_samples = self.base_samples.view(shape) if self.base_samples.device != posterior.device: self.to(device=posterior.device) # pragma: nocover if self.base_samples.dtype != posterior.dtype: self.to(dtype=posterior.dtype)
def _check_compatibility(models: ModelListGP) -> None: """Check if a ModelListGP can be converted.""" # check that all submodules are of the same type for modn, mod in models[0].named_modules(): mcls = mod.__class__ if not all(isinstance(_get_module(m, modn), mcls) for m in models[1:]): raise UnsupportedError( "Sub-modules must be of the same type across models.") # check that each model is a BatchedMultiOutputGPyTorchModel if not all(isinstance(m, BatchedMultiOutputGPyTorchModel) for m in models): raise UnsupportedError( "All models must be of type BatchedMultiOutputGPyTorchModel.") # TODO: Add support for HeteroskedasticSingleTaskGP if any(isinstance(m, HeteroskedasticSingleTaskGP) for m in models): raise NotImplementedError( "Conversion of HeteroskedasticSingleTaskGP is currently unsupported." ) # TODO: Add support for custom likelihoods if any(getattr(m, "_is_custom_likelihood", False) for m in models): raise NotImplementedError( "Conversion of models with custom likelihoods is currently unsupported." ) # check that each model is single-output if not all(m._num_outputs == 1 for m in models): raise UnsupportedError("All models must be single-output.") # check that training inputs are the same if not all( torch.equal(ti, tj) for m in models[1:] for ti, tj in zip(models[0].train_inputs, m.train_inputs)): raise UnsupportedError( "training inputs must agree for all sub-models.")
def _construct_base_samples(self, posterior: Posterior, shape: torch.Size) -> None: r"""Generate quasi-random Normal base samples (if necessary). This function will generate a new set of base samples and set the `base_samples` buffer if one of the following is true: - `resample=True` - the MCSampler has no `base_samples` attribute. - `shape` is different than `self.base_samples.shape` (if `collapse_batch_dims=True`, then batch dimensions of will be automatically broadcasted as necessary) Args: posterior: The Posterior for which to generate base samples. shape: The shape of the base samples to construct. """ if ( self.resample or not hasattr(self, "base_samples") or self.base_samples.shape[-2:] != shape[-2:] or (not self.collapse_batch_dims and shape != self.base_samples.shape) ): output_dim = shape[-2:].numel() if output_dim > SobolEngine.MAXDIM: raise UnsupportedError( "SobolQMCSampler only supports dimensions " f"`q * o <= {SobolEngine.MAXDIM}`. Requested: {output_dim}" ) base_samples = draw_sobol_normal_samples( d=output_dim, n=shape[:-2].numel(), device=posterior.device, dtype=posterior.dtype, seed=self.seed, ) self.seed += 1 base_samples = base_samples.view(shape) self.register_buffer("base_samples", base_samples) elif self.collapse_batch_dims and shape != posterior.event_shape: self.base_samples = self.base_samples.view(shape) if self.base_samples.device != posterior.device: self.to(device=posterior.device) # pragma: nocover if self.base_samples.dtype != posterior.dtype: self.to(dtype=posterior.dtype)
def __init__( self, model: GPyTorchModel, X_observed: Tensor, num_fantasies: int = 20, maximize: bool = True, ) -> None: r"""Single-outcome Noisy Expected Improvement (via fantasies). Args: model: A fitted single-outcome model. X_observed: A `n x d` Tensor of observed points that are likely to be the best observed points so far. num_fantasies: The number of fantasies to generate. The higher this number the more accurate the model (at the expense of model complexity and performance). maximize: If True, consider the problem a maximization problem. """ if not isinstance(model, FixedNoiseGP): raise UnsupportedError( "Only FixedNoiseGPs are currently supported for fantasy NEI") # sample fantasies with torch.no_grad(): posterior = model.posterior(X=X_observed) sampler = SobolQMCNormalSampler(num_fantasies) Y_fantasized = sampler(posterior).squeeze(-1) batch_X_observed = X_observed.expand(num_fantasies, *X_observed.shape) # The fantasy model will operate in batch mode fantasy_model = _get_noiseless_fantasy_model( model=model, batch_X_observed=batch_X_observed, Y_fantasized=Y_fantasized) if maximize: best_f = Y_fantasized.max(dim=-1)[0] else: best_f = Y_fantasized.min(dim=-1)[0] super().__init__(model=fantasy_model, best_f=best_f, maximize=maximize)
def __init__( # noqa C901 self, fidelity_dims: List[int], dimension: Optional[int] = None, power_prior: Optional[Prior] = None, power_constraint: Optional[Interval] = None, nu: float = 2.5, lengthscale_prior_unbiased: Optional[Prior] = None, lengthscale_prior_biased: Optional[Prior] = None, lengthscale_constraint_unbiased: Optional[Interval] = None, lengthscale_constraint_biased: Optional[Interval] = None, covar_module_unbiased: Optional[Kernel] = None, covar_module_biased: Optional[Kernel] = None, **kwargs: Any, ) -> None: if dimension is None and kwargs.get("active_dims") is None: raise UnsupportedError( "Must specify dimension when not specifying active_dims.") n_fidelity = len(fidelity_dims) if len(set(fidelity_dims)) != n_fidelity: raise ValueError("fidelity_dims must not have repeated elements") if n_fidelity not in {1, 2}: raise UnsupportedError( "LinearTruncatedFidelityKernel accepts either one or two" "fidelity parameters.") if nu not in {0.5, 1.5, 2.5}: raise ValueError("nu must be one of 0.5, 1.5, or 2.5") super().__init__(**kwargs) self.fidelity_dims = fidelity_dims if power_constraint is None: power_constraint = Positive() if lengthscale_prior_unbiased is None: lengthscale_prior_unbiased = GammaPrior(3, 6) if lengthscale_prior_biased is None: lengthscale_prior_biased = GammaPrior(6, 2) if lengthscale_constraint_unbiased is None: lengthscale_constraint_unbiased = Positive() if lengthscale_constraint_biased is None: lengthscale_constraint_biased = Positive() self.register_parameter( name="raw_power", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), ) self.register_constraint("raw_power", power_constraint) if power_prior is not None: self.register_prior( "power_prior", power_prior, lambda: self.power, lambda v: self._set_power(v), ) if self.active_dims is not None: dimension = len(self.active_dims) if covar_module_unbiased is None: covar_module_unbiased = MaternKernel( nu=nu, batch_shape=self.batch_shape, lengthscale_prior=lengthscale_prior_unbiased, ard_num_dims=dimension - n_fidelity, lengthscale_constraint=lengthscale_constraint_unbiased, ) if covar_module_biased is None: covar_module_biased = MaternKernel( nu=nu, batch_shape=self.batch_shape, lengthscale_prior=lengthscale_prior_biased, ard_num_dims=dimension - n_fidelity, lengthscale_constraint=lengthscale_constraint_biased, ) self.covar_module_unbiased = covar_module_unbiased self.covar_module_biased = covar_module_biased
def model_list_to_batched( model_list: ModelListGP) -> BatchedMultiOutputGPyTorchModel: """Convert a ModelListGP to a BatchedMultiOutputGPyTorchModel. Args: model_list: The `ModelListGP` to be converted to the appropriate `BatchedMultiOutputGPyTorchModel`. All sub-models must be of the same type and have the shape (batch shape and number of training inputs). Returns: The model converted into a `BatchedMultiOutputGPyTorchModel`. Example: >>> list_gp = ModelListGP(gp1, gp2) >>> batch_gp = model_list_to_batched(list_gp) """ models = model_list.models _check_compatibility(models) # if the list has only one model, we can just return a copy of that if len(models) == 1: return deepcopy(models[0]) # construct inputs train_X = deepcopy(models[0].train_inputs[0]) train_Y = torch.stack([m.train_targets.clone() for m in models], dim=-1) kwargs = {"train_X": train_X, "train_Y": train_Y} if isinstance(models[0], FixedNoiseGP): kwargs["train_Yvar"] = torch.stack( [m.likelihood.noise_covar.noise.clone() for m in models], dim=-1) if isinstance(models[0], SingleTaskMultiFidelityGP): init_args = models[0]._init_args if not all(v == m._init_args[k] for m in models[1:] for k, v in init_args.items()): raise UnsupportedError( "All models must have the same fidelity parameters.") kwargs.update(init_args) # construct the batched GP model input_transform = getattr(models[0], "input_transform", None) batch_gp = models[0].__class__(input_transform=input_transform, **kwargs) adjusted_batch_keys, non_adjusted_batch_keys = _get_adjusted_batch_keys( batch_state_dict=batch_gp.state_dict(), input_transform=input_transform) input_batch_dims = len(models[0]._input_batch_shape) # ensure scalars agree (TODO: Allow different priors for different outputs) for n in non_adjusted_batch_keys: v0 = _get_module(models[0], n) if not all(torch.equal(_get_module(m, n), v0) for m in models[1:]): raise UnsupportedError("All scalars must have the same value.") # ensure dimensions of all tensors agree for n in adjusted_batch_keys: shape0 = _get_module(models[0], n).shape if not all(_get_module(m, n).shape == shape0 for m in models[1:]): raise UnsupportedError("All tensors must have the same shape.") # now construct the batched state dict non_adjusted_batch_state_dict = { s: p.clone() for s, p in models[0].state_dict().items() if s in non_adjusted_batch_keys } adjusted_batch_state_dict = { t: (torch.stack([m.state_dict()[t].clone() for m in models], dim=input_batch_dims) if "active_dims" not in t else models[0].state_dict()[t].clone()) for t in adjusted_batch_keys } batch_state_dict = { **non_adjusted_batch_state_dict, **adjusted_batch_state_dict } # load the state dict into the new model batch_gp.load_state_dict(batch_state_dict) return batch_gp
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None: raise UnsupportedError( "Analytic acquisition functions do not account for X_pending yet.")
def __init__( self, dimension: int = 3, nu: float = 2.5, train_iteration_fidelity: bool = True, train_data_fidelity: bool = True, lengthscale_prior: Optional[Prior] = None, power_prior: Optional[Prior] = None, power_constraint: Optional[Interval] = None, lengthscale_2_prior: Optional[Prior] = None, lengthscale_2_constraint: Optional[Interval] = None, lengthscale_constraint: Optional[Interval] = None, covar_module_1: Optional[Kernel] = None, covar_module_2: Optional[Kernel] = None, **kwargs: Any, ): if not train_iteration_fidelity and not train_data_fidelity: raise UnsupportedError( "You should have at least one fidelity parameter.") if nu not in {0.5, 1.5, 2.5}: raise ValueError("nu expected to be 0.5, 1.5, or 2.5") super().__init__(**kwargs) self.train_iteration_fidelity = train_iteration_fidelity self.train_data_fidelity = train_data_fidelity if power_constraint is None: power_constraint = Positive() if lengthscale_prior is None: lengthscale_prior = GammaPrior(3, 6) if lengthscale_2_prior is None: lengthscale_2_prior = GammaPrior(6, 2) if lengthscale_constraint is None: lengthscale_constraint = Positive() if lengthscale_2_constraint is None: lengthscale_2_constraint = Positive() self.register_parameter( name="raw_power", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1)), ) if power_prior is not None: self.register_prior( "power_prior", power_prior, lambda: self.power, lambda v: self._set_power(v), ) self.register_constraint("raw_power", power_constraint) m = self.train_iteration_fidelity + self.train_data_fidelity if self.active_dims is not None: dimension = len(self.active_dims) if covar_module_1 is None: self.covar_module_1 = MaternKernel( nu=nu, batch_shape=self.batch_shape, lengthscale_prior=lengthscale_prior, ard_num_dims=dimension - m, lengthscale_constraint=lengthscale_constraint, ) else: self.covar_module_1 = covar_module_1 if covar_module_2 is None: self.covar_module_2 = MaternKernel( nu=nu, batch_shape=self.batch_shape, lengthscale_prior=lengthscale_2_prior, ard_num_dims=dimension - m, lengthscale_constraint=lengthscale_2_constraint, ) else: self.covar_module_2 = covar_module_2
def batched_multi_output_to_single_output( batch_mo_model: BatchedMultiOutputGPyTorchModel, ) -> BatchedMultiOutputGPyTorchModel: """Convert a model from batched multi-output to a batched single-output. Note: the underlying GPyTorch GP does not change. The GPyTorch GP's batch_shape (referred to as `_aug_batch_shape`) is still `_input_batch_shape x num_outputs`. The only things that change are the attributes of the BatchedMultiOutputGPyTorchModel that are responsible the internal accounting of the number of outputs: namely, num_outputs, _input_batch_shape, and _aug_batch_shape. Initially for the batched MO models these are: `num_outputs = m`, `_input_batch_shape = train_X.batch_shape`, and `_aug_batch_shape = train_X.batch_shape + torch.Size([num_outputs])`. In the new SO model, these are: `num_outputs = 1`, `_input_batch_shape = train_X.batch_shape + torch.Size([num_outputs])`, and `_aug_batch_shape = train_X.batch_shape + torch.Size([num_outputs])`. This is a (hopefully) temporary measure until multi-output MVNs with independent outputs have better support in GPyTorch (see https://github.com/cornellius-gp/gpytorch/pull/1083). Args: batched_mo_model: The BatchedMultiOutputGPyTorchModel Returns: The model converted into a batch single-output model. Example: >>> train_X = torch.rand(5, 2) >>> train_Y = torch.rand(5, 2) >>> batch_mo_gp = SingleTaskGP(train_X, train_Y) >>> batch_so_gp = batched_multioutput_to_single_output(batch_gp) """ # TODO: Add support for HeteroskedasticSingleTaskGP. if isinstance(batch_mo_model, HeteroskedasticSingleTaskGP): raise NotImplementedError( "Conversion of HeteroskedasticSingleTaskGP currently not supported." ) elif not isinstance(batch_mo_model, BatchedMultiOutputGPyTorchModel): raise UnsupportedError( "Only BatchedMultiOutputGPyTorchModels are supported.") # TODO: Add support for custom likelihoods. elif getattr(batch_mo_model, "_is_custom_likelihood", False): raise NotImplementedError( "Conversion of models with custom likelihoods is currently unsupported." ) input_transform = getattr(batch_mo_model, "input_transform", None) batch_sd = batch_mo_model.state_dict() # TODO: add support for outcome transforms. if hasattr(batch_mo_model, "outcome_transform"): raise NotImplementedError( "Converting batched multi-output models with outcome transforms " "is not currently supported.") kwargs = { "train_X": batch_mo_model.train_inputs[0].clone(), "train_Y": batch_mo_model.train_targets.clone().unsqueeze(-1), } if isinstance(batch_mo_model, FixedNoiseGP): noise_covar = batch_mo_model.likelihood.noise_covar kwargs["train_Yvar"] = noise_covar.noise.clone().unsqueeze(-1) if isinstance(batch_mo_model, SingleTaskMultiFidelityGP): kwargs.update(batch_mo_model._init_args) single_outcome_model = batch_mo_model.__class__( input_transform=input_transform, **kwargs) single_outcome_model.load_state_dict(batch_sd) return single_outcome_model