def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: r"""A single-task exact GP model using fixed noise levels. Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> train_Yvar = torch.full_like(train_Y, 0.2) >>> model = FixedNoiseGP(train_X, train_Y, train_Yvar) """ if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) validate_input_scaling(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, train_Yvar = self._transform_tensor_args( X=train_X, Y=train_Y, Yvar=train_Yvar) likelihood = FixedNoiseGaussianLikelihood( noise=train_Yvar, batch_shape=self._aug_batch_shape) ExactGP.__init__(self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.covar_module = ScaleKernel( base_kernel=MaternKernel( nu=2.5, ard_num_dims=train_X.shape[-1], batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) if outcome_transform is not None: self.outcome_transform = outcome_transform self._subset_batch_dict = { "mean_module.constant": -2, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } self.to(train_X)
def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, covar_module: Optional[Module] = None, ) -> None: r"""A single-task exact GP model. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. covar_module: The covariance (kernel) matrix. If omitted, use the MaternKernel. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> model = SingleTaskGP(train_X, train_Y) """ validate_input_scaling(train_X=train_X, train_Y=train_Y) self._validate_tensor_args(X=train_X, Y=train_Y) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y) if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) else: self._is_custom_likelihood = True ExactGP.__init__(self, train_X, train_Y, likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) if covar_module is None: self.covar_module = ScaleKernel( MaternKernel( nu=2.5, ard_num_dims=train_X.shape[-1], batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) else: self.covar_module = covar_module self.to(train_X)
def __init__(self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None) -> None: r"""A single-task exact GP model. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X[:, 0]) + torch.cos(train_X[:, 1]) >>> model = SingleTaskGP(train_X, train_Y) """ ard_num_dims = train_X.shape[-1] train_X, train_Y, _ = self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = multioutput_to_batch_mode_transform( train_X=train_X, train_Y=train_Y, num_outputs=self._num_outputs) if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) else: self._likelihood_state_dict = deepcopy(likelihood.state_dict()) ExactGP.__init__(self, train_X, train_Y, likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.covar_module = ScaleKernel( MaternKernel( nu=2.5, ard_num_dims=ard_num_dims, batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) self.to(train_X)
def __init__(self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor) -> None: r"""A single-task exact GP model using fixed noise levels. Args: train_X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training features. train_Y: A `n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of training observations. train_Yvar: A `batch_shape x n x (o)` or `batch_shape x n x (o)` (batch mode) tensor of observed measurement noise. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X[:, 0]]) + torch.cos(train_X[:, 1]) >>> train_Yvar = torch.full_like(train_Y, 0.2) >>> model = FixedNoiseGP(train_X, train_Y, train_Yvar) """ ard_num_dims = train_X.shape[-1] train_X, train_Y, train_Yvar = self._set_dimensions( train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar ) train_X, train_Y, train_Yvar = multioutput_to_batch_mode_transform( train_X=train_X, train_Y=train_Y, num_outputs=self._num_outputs, train_Yvar=train_Yvar, ) likelihood = FixedNoiseGaussianLikelihood( noise=train_Yvar, batch_shape=self._aug_batch_shape ) ExactGP.__init__( self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood ) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.covar_module = ScaleKernel( base_kernel=MaternKernel( nu=2.5, ard_num_dims=ard_num_dims, batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) self.to(train_X)
def __init__(self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor) -> None: self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, train_Yvar = self._transform_tensor_args( X=train_X, Y=train_Y, Yvar=train_Yvar) likelihood = FixedNoiseGaussianLikelihood( noise=train_Yvar, batch_shape=self._aug_batch_shape) ExactGP.__init__(self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.covar_module = ScaleKernel( base_kernel=RBFKernel( ard_num_dims=train_X.shape[-1], batch_shape=self._aug_batch_shape, ), batch_shape=self._aug_batch_shape, ) self.to(train_X)
def test_manual_bounds(self, cuda=False): device = torch.device("cuda") if cuda else torch.device("cpu") for dtype in (torch.float, torch.double): # get a test module train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype) train_y = torch.tensor([4.0], device=device, dtype=dtype) likelihood = GaussianLikelihood() model = ExactGP(train_x, train_y, likelihood) model.covar_module = RBFKernel(ard_num_dims=3) model.mean_module = ConstantMean() model.to(device=device, dtype=dtype) mll = ExactMarginalLogLikelihood(likelihood, model) # test the basic case x, pdict, bounds = module_to_array( module=mll, bounds={"model.covar_module.raw_lengthscale": (0.1, None)} ) self.assertTrue(np.array_equal(x, np.zeros(5))) expected_sizes = { "likelihood.noise_covar.raw_noise": torch.Size([1]), "model.covar_module.raw_lengthscale": torch.Size([1, 3]), "model.mean_module.constant": torch.Size([1]), } self.assertEqual(set(pdict.keys()), set(expected_sizes.keys())) for pname, val in pdict.items(): self.assertEqual(val.dtype, dtype) self.assertEqual(val.shape, expected_sizes[pname]) self.assertEqual(val.device.type, device.type) lower_exp = np.full_like(x, 0.1) for p in ("likelihood.noise_covar.raw_noise", "model.mean_module.constant"): lower_exp[_get_index(pdict, p)] = -np.inf self.assertTrue(np.equal(bounds[0], lower_exp).all()) self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
def test_set_parameters(self, cuda=False): device = torch.device("cuda") if cuda else torch.device("cpu") for dtype in (torch.float, torch.double): # get a test module train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype) train_y = torch.tensor([4.0], device=device, dtype=dtype) likelihood = GaussianLikelihood() model = ExactGP(train_x, train_y, likelihood) model.covar_module = RBFKernel(ard_num_dims=3) model.mean_module = ConstantMean() model.to(device=device, dtype=dtype) mll = ExactMarginalLogLikelihood(likelihood, model) # get parameters x, pdict, bounds = module_to_array(module=mll) # Set parameters mll = set_params_with_array(mll, np.array([1.0, 2.0, 3.0, 4.0, 5.0]), pdict) z = dict(mll.named_parameters()) self.assertTrue( torch.equal( z["likelihood.noise_covar.raw_noise"], torch.tensor([1.0], device=device, dtype=dtype), ) ) self.assertTrue( torch.equal( z["model.covar_module.raw_lengthscale"], torch.tensor([[2.0, 3.0, 4.0]], device=device, dtype=dtype), ) ) self.assertTrue( torch.equal( z["model.mean_module.constant"], torch.tensor([5.0], device=device, dtype=dtype), ) ) # Extract again x2, pdict2, bounds2 = module_to_array(module=mll) self.assertTrue(np.array_equal(x2, np.array([1.0, 2.0, 3.0, 4.0, 5.0])))
def test_exclude(self): for dtype in (torch.float, torch.double): # get a test module train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype) train_y = torch.tensor([4.0], device=self.device, dtype=dtype) likelihood = GaussianLikelihood() model = ExactGP(train_x, train_y, likelihood) model.covar_module = RBFKernel(ard_num_dims=3) model.mean_module = ConstantMean() model.to(device=self.device, dtype=dtype) mll = ExactMarginalLogLikelihood(likelihood, model) # test the basic case x, pdict, bounds = module_to_array( module=mll, exclude={"model.mean_module.constant"}) self.assertTrue(np.array_equal(x, np.zeros(4))) expected_sizes = { "likelihood.noise_covar.raw_noise": torch.Size([1]), "model.covar_module.raw_lengthscale": torch.Size([1, 3]), } self.assertEqual(set(pdict.keys()), set(expected_sizes.keys())) for pname, val in pdict.items(): self.assertEqual(val.dtype, dtype) self.assertEqual(val.shape, expected_sizes[pname]) self.assertEqual(val.device.type, self.device.type) self.assertIsNone(bounds)
def test_set_parameters(self): for dtype in (torch.float, torch.double): # get a test module train_x = torch.tensor([[1.0, 2.0, 3.0]], device=self.device, dtype=dtype) train_y = torch.tensor([4.0], device=self.device, dtype=dtype) likelihood = GaussianLikelihood() model = ExactGP(train_x, train_y, likelihood) model.covar_module = RBFKernel(ard_num_dims=3) model.mean_module = ConstantMean() model.to(device=self.device, dtype=dtype) mll = ExactMarginalLogLikelihood(likelihood, model) # get parameters x, pdict, bounds = module_to_array(module=mll) # Set parameters mll = set_params_with_array(mll, np.array([1.0, 2.0, 3.0, 4.0, 5.0]), pdict) z = dict(mll.named_parameters()) self.assertTrue( torch.equal( z["likelihood.noise_covar.raw_noise"], torch.tensor([1.0], device=self.device, dtype=dtype), )) self.assertTrue( torch.equal( z["model.covar_module.raw_lengthscale"], torch.tensor([[2.0, 3.0, 4.0]], device=self.device, dtype=dtype), )) self.assertTrue( torch.equal( z["model.mean_module.constant"], torch.tensor([5.0], device=self.device, dtype=dtype), )) # Extract again x2, pdict2, bounds2 = module_to_array(module=mll) self.assertTrue( np.array_equal(x2, np.array([1.0, 2.0, 3.0, 4.0, 5.0])))
def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, covar_module: Optional[Module] = None, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: r"""A single-task exact GP model. Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. covar_module: The module computing the covariance (Kernel) matrix. If omitted, use a `MaternKernel`. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> model = SingleTaskGP(train_X, train_Y) """ if outcome_transform is not None: train_Y, _ = outcome_transform(train_Y) validate_input_scaling(train_X=train_X, train_Y=train_Y) self._validate_tensor_args(X=train_X, Y=train_Y) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y) if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) else: self._is_custom_likelihood = True ExactGP.__init__(self, train_X, train_Y, likelihood) self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape) if covar_module is None: self.covar_module = ScaleKernel( MaternKernel( nu=2.5, ard_num_dims=train_X.shape[-1], batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) self._subset_batch_dict = { "likelihood.noise_covar.raw_noise": -2, "mean_module.constant": -2, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } else: self.covar_module = covar_module # TODO: Allow subsetting of other covar modules if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X)