def __init__(self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor) -> None: r"""A single-task exact GP model using a heteroskedastic noise model. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of training observations. train_Yvar: A `batch_shape x n x m` or `batch_shape x n x m` (batch mode) tensor of observed measurement noise. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> se = torch.norm(train_X, dim=1, keepdim=True) >>> train_Yvar = 0.1 + se * torch.rand_like(train_Y) >>> model = HeteroskedasticSingleTaskGP(train_X, train_Y, train_Yvar) """ validate_input_scaling(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar) self._set_dimensions(train_X=train_X, train_Y=train_Y) noise_likelihood = GaussianLikelihood( noise_prior=SmoothedBoxPrior(-3, 5, 0.5, transform=torch.log), batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=1.0 ), ) noise_model = SingleTaskGP( train_X=train_X, train_Y=train_Yvar.log(), likelihood=noise_likelihood ) likelihood = _GaussianLikelihoodBase(HeteroskedasticNoise(noise_model)) super().__init__(train_X=train_X, train_Y=train_Y, likelihood=likelihood) self.to(train_X)
def __init__(self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor) -> None: r"""A single-task exact GP model using a heteroskedastic noise model. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of training observations. train_Yvar: A `batch_shape x n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of observed measurement noise.. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X[:, 0]]) + torch.cos(train_X[:, 1]) >>> se = torch.norm(train_X - 0.5, dim=-1) >>> train_Yvar = 0.1 + se * torch.rand_like(train_Y) >>> model = HeteroskedasticSingleTaskGP(train_X, train_Y, train_Yvar) """ self._set_dimensions(train_X=train_X, train_Y=train_Y) train_Y_log_var = torch.log(train_Yvar) noise_likelihood = GaussianLikelihood( noise_prior=SmoothedBoxPrior(-3, 5, 0.5, transform=torch.log), batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan(MIN_INFERRED_NOISE_LEVEL, transform=None), ) noise_model = SingleTaskGP( train_X=train_X, train_Y=train_Y_log_var, likelihood=noise_likelihood ) likelihood = _GaussianLikelihoodBase(HeteroskedasticNoise(noise_model)) super().__init__(train_X=train_X, train_Y=train_Y, likelihood=likelihood) self.to(train_X)
def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, covar_module: Optional[Module] = None, ) -> None: r"""A single-task exact GP model. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. covar_module: The covariance (kernel) matrix. If omitted, use the MaternKernel. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> model = SingleTaskGP(train_X, train_Y) """ validate_input_scaling(train_X=train_X, train_Y=train_Y) self._validate_tensor_args(X=train_X, Y=train_Y) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y) if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) else: self._is_custom_likelihood = True ExactGP.__init__(self, train_X, train_Y, likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) if covar_module is None: self.covar_module = ScaleKernel( MaternKernel( nu=2.5, ard_num_dims=train_X.shape[-1], batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) else: self.covar_module = covar_module self.to(train_X)
def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: r"""A single-task exact GP model using a heteroskedastic noise model. Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Note that the noise model internally log-transforms the variances, which will happen after this transform is applied. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> se = torch.norm(train_X, dim=1, keepdim=True) >>> train_Yvar = 0.1 + se * torch.rand_like(train_Y) >>> model = HeteroskedasticSingleTaskGP(train_X, train_Y, train_Yvar) """ if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) validate_input_scaling(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar) self._set_dimensions(train_X=train_X, train_Y=train_Y) noise_likelihood = GaussianLikelihood( noise_prior=SmoothedBoxPrior(-3, 5, 0.5, transform=torch.log), batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan(MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=1.0), ) noise_model = SingleTaskGP( train_X=train_X, train_Y=train_Yvar, likelihood=noise_likelihood, outcome_transform=Log(), ) likelihood = _GaussianLikelihoodBase(HeteroskedasticNoise(noise_model)) super().__init__(train_X=train_X, train_Y=train_Y, likelihood=likelihood) self.register_added_loss_term("noise_added_loss") self.update_added_loss_term("noise_added_loss", NoiseModelAddedLossTerm(noise_model)) if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X)
def __init__(self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None) -> None: r"""A single-task exact GP model. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X[:, 0]) + torch.cos(train_X[:, 1]) >>> model = SingleTaskGP(train_X, train_Y) """ ard_num_dims = train_X.shape[-1] train_X, train_Y, _ = self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = multioutput_to_batch_mode_transform( train_X=train_X, train_Y=train_Y, num_outputs=self._num_outputs) if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) else: self._likelihood_state_dict = deepcopy(likelihood.state_dict()) ExactGP.__init__(self, train_X, train_Y, likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.covar_module = ScaleKernel( MaternKernel( nu=2.5, ard_num_dims=ard_num_dims, batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) self.to(train_X)
def update(self, context, actions, rewards): self.x.append(context) self.y.append(actions if rewards > 0 else abs(actions-1)) # if rewards > 0: # self.y.append(actions) # elif rewards == 0: # self.y.append(0.5) # elif rewards < 0: # self.y.append(abs(actions-1)) likelihood = GaussianLikelihood( noise_prior=noise_prior, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) self.model = GPRegressionModel(torch.Tensor(self.x).to(device), torch.Tensor(self.y).to(device), likelihood).to(device) mll = ExactMarginalLogLikelihood(likelihood, self.model).to(device) fit_gpytorch_model(mll, optimizer=fit_gpytorch_torch)
def fit_gp(self) -> None: """ Re-fits the GP using the most up to date data. """ noise_prior = GammaPrior(1.1, 0.5) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=[], noise_constraint=GreaterThan( # 0.000005, # minimum observation noise assumed in the GP model 0.0001, transform=None, initial_value=noise_prior_mode, ), ) self.model = SingleTaskGP( self.X, self.Y, likelihood, outcome_transform=Standardize(m=1) ) mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model) fit_gpytorch_model(mll) # dummy computation to be safe with gp fit try: dummy = torch.rand( (1, self.q, self.dim), dtype=self.dtype, device=self.device ) _ = self.model.posterior(dummy).mean except RuntimeError as err: if self.fit_count < 5: self.fit_count += 1 self.Y = self.Y + torch.randn_like(self.Y) * 0.001 self.fit_gp() else: raise err self.fit_count = 0 self.passed = False
def initialize_model(self, train_X, train_Y, state_dict=None): """Initialise model for BO.""" # From: https://github.com/pytorch/botorch/issues/179 noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate MIN_INFERRED_NOISE_LEVEL = 1e-3 likelihood = GaussianLikelihood( noise_prior=noise_prior, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) # train_x = self.scale_to_0_1_bounds(train_X) train_Y = standardize(train_Y) gp = SingleTaskGP(train_X, train_Y, likelihood=likelihood) mll = ExactMarginalLogLikelihood(gp.likelihood, gp) # load state dict if it is passed if state_dict is not None: gp.load_state_dict(state_dict) return mll, gp
def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, covar_module: Optional[Module] = None, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: r"""A single-task exact GP model. Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. covar_module: The module computing the covariance (Kernel) matrix. If omitted, use a `MaternKernel`. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> model = SingleTaskGP(train_X, train_Y) """ if outcome_transform is not None: train_Y, _ = outcome_transform(train_Y) validate_input_scaling(train_X=train_X, train_Y=train_Y) self._validate_tensor_args(X=train_X, Y=train_Y) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y) if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) else: self._is_custom_likelihood = True ExactGP.__init__(self, train_X, train_Y, likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) if covar_module is None: self.covar_module = ScaleKernel( MaternKernel( nu=2.5, ard_num_dims=train_X.shape[-1], batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) self._subset_batch_dict = { "likelihood.noise_covar.raw_noise": -2, "mean_module.constant": -2, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } else: self.covar_module = covar_module # TODO: Allow subsetting of other covar modules if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X)