Esempio n. 1
0
 def test_set_transformed_inputs(self):
     for dtype in (torch.float, torch.double):
         train_x = torch.rand(5, 1, dtype=dtype, device=self.device)
         train_y = torch.rand(5, 1, dtype=dtype, device=self.device)
         tf = Normalize(
             d=1,
             bounds=torch.tensor([[0.0], [2.0]], dtype=dtype, device=self.device),
             transform_on_preprocess=False,
         )
         model = SingleTaskGP(train_x, train_y, input_transform=tf)
         self.assertTrue(torch.equal(model.train_inputs[0], train_x))
         mll = ExactMarginalLogLikelihood(model.likelihood, model)
         # check that input transform is only applied when the transform
         # is a transform_on_preprocess is True
         self.assertTrue(torch.equal(model.train_inputs[0], train_x))
         tf.transform_on_preprocess = True
         _set_transformed_inputs(mll)
         self.assertTrue(torch.equal(model.train_inputs[0], tf(train_x)))
         model.eval()
         # test no set_train_data method
         mock_model = MockGP(MockPosterior())
         mock_model.train_inputs = (train_x,)
         mock_model.likelihood = model.likelihood
         mock_model.input_transform = tf
         mll = ExactMarginalLogLikelihood(mock_model.likelihood, mock_model)
         with self.assertRaises(BotorchError):
             _set_transformed_inputs(mll)
Esempio n. 2
0
def laplace_sample_U(mll: ExactMarginalLogLikelihood,
                     nsamp: int) -> Tuple[Tensor, Tensor, Tensor]:
    """Draw posterior samples of kernel hyperparameters using Laplace
    approximation.

    Only the Mahalanobis distance matrix is sampled.

    The diagonal of the Hessian is estimated using finite differences of the
    autograd gradients. The Laplace approximation is then N(p_map, inv(-H)).
    We construct a set of nsamp kernel hyperparameters by drawing nsamp-1
    values from this distribution, and prepending as the first sample the MAP
    parameters.

    Args:
        mll: MLL object of MAP ALEBO GP.
        nsamp: Number of samples to return.

    Returns: Batch tensors of the kernel hyperparameters Uvec, mean constant,
        and output scale.
    """
    # Estimate diagonal of the Hessian
    mll.train()
    x0, property_dict, bounds = module_to_array(module=mll)
    x0 = x0.astype(np.float64)  # This is the MAP parameters
    H = np.zeros((len(x0), len(x0)))
    epsilon = 1e-4 + 1e-3 * np.abs(x0)
    for i, _ in enumerate(x0):
        # Compute gradient of df/dx_i wrt x_i
        def f(x):
            x_all = x0.copy()
            x_all[i] = x[0]
            return -_scipy_objective_and_grad(x_all, mll, property_dict)[1][i]

        H[i, i] = approx_fprime(np.array([x0[i]]), f,
                                epsilon=epsilon[i])  # pyre-ignore

    # Sample only Uvec; leave mean and output scale fixed.
    assert list(property_dict.keys()) == [
        "model.mean_module.constant",
        "model.covar_module.raw_outputscale",
        "model.covar_module.base_kernel.Uvec",
    ]
    H = H[2:, 2:]
    H += np.diag(-1e-3 *
                 np.ones(H.shape[0]))  # Add a nugget for inverse stability
    Sigma = np.linalg.inv(-H)
    samples = np.random.multivariate_normal(mean=x0[2:],
                                            cov=Sigma,
                                            size=(nsamp - 1))
    # Include the MAP estimate
    samples = np.vstack((x0[2:], samples))
    # Reshape
    attrs = property_dict["model.covar_module.base_kernel.Uvec"]
    Uvec_batch = torch.tensor(samples, dtype=attrs.dtype,
                              device=attrs.device).reshape(
                                  nsamp, *attrs.shape)
    # Get the other properties into batch mode
    mean_constant_batch = mll.model.mean_module.constant.repeat(nsamp, 1)
    output_scale_batch = mll.model.covar_module.raw_outputscale.repeat(nsamp)
    return Uvec_batch, mean_constant_batch, output_scale_batch
Esempio n. 3
0
 def _getBatchedModel(
     self, kind="SingleTaskGP", double=False, outcome_transform=False
 ):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze(
         -1
     )
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     train_y1 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y2 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y = torch.cat([train_y1, train_y2], dim=-1)
     kwargs = {}
     if outcome_transform:
         kwargs["outcome_transform"] = Standardize(m=2)
     if kind == "SingleTaskGP":
         model = SingleTaskGP(train_x, train_y, **kwargs)
     elif kind == "FixedNoiseGP":
         model = FixedNoiseGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     elif kind == "HeteroskedasticSingleTaskGP":
         model = HeteroskedasticSingleTaskGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     else:
         raise NotImplementedError
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=self.device, dtype=dtype)
Esempio n. 4
0
 def _setUp(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     train_yvar = torch.tensor(0.1 ** 2, device=device)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]], device=device, dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(
         self.model_st.likelihood, self.model_st
     )
     self.mll_st = fit_gpytorch_model(self.mll_st, options={"maxiter": 5})
     model_fn = FixedNoiseGP(
         self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
     )
     self.model_fn = model_fn.to(device=device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(
         self.model_fn.likelihood, self.model_fn
     )
     self.mll_fn = fit_gpytorch_model(self.mll_fn, options={"maxiter": 5})
Esempio n. 5
0
 def _setUp(self, double=False):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device,
                              dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi))
     train_yvar = torch.tensor(0.1**2, device=self.device)
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]],
                                device=self.device,
                                dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=self.device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(self.model_st.likelihood,
                                              self.model_st)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll_st = fit_gpytorch_model(self.mll_st,
                                          options={"maxiter": 5},
                                          max_retries=1)
     model_fn = FixedNoiseGP(self.train_x, self.train_y,
                             self.train_yvar.expand_as(self.train_y))
     self.model_fn = model_fn.to(device=self.device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(self.model_fn.likelihood,
                                              self.model_fn)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll_fn = fit_gpytorch_model(self.mll_fn,
                                          options={"maxiter": 5},
                                          max_retries=1)
Esempio n. 6
0
    def test_gp(self):
        for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
            num_dim = 1 + (iteration_fidelity is not None) + (data_fidelity
                                                              is not None)
            for batch_shape, num_outputs, dtype, lin_trunc in itertools.product(
                (torch.Size(), torch.Size([2])),
                (1, 2),
                (torch.float, torch.double),
                (False, True),
            ):
                tkwargs = {"device": self.device, "dtype": dtype}
                model, _ = _get_model_and_data(
                    iteration_fidelity=iteration_fidelity,
                    data_fidelity=data_fidelity,
                    batch_shape=batch_shape,
                    num_outputs=num_outputs,
                    lin_truncated=lin_trunc,
                    **tkwargs,
                )
                mll = ExactMarginalLogLikelihood(model.likelihood, model)
                mll.to(**tkwargs)
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore",
                                            category=OptimizationWarning)
                    fit_gpytorch_model(mll,
                                       sequential=False,
                                       options={"maxiter": 1})

                # test init
                self.assertIsInstance(model.mean_module, ConstantMean)
                self.assertIsInstance(model.covar_module, ScaleKernel)

                # test param sizes
                params = dict(model.named_parameters())
                for p in params:
                    self.assertEqual(
                        params[p].numel(),
                        num_outputs * torch.tensor(batch_shape).prod().item(),
                    )

                # test posterior
                # test non batch evaluation
                X = torch.rand(batch_shape + torch.Size([3, num_dim]),
                               **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape,
                                 batch_shape + torch.Size([3, num_outputs]))
                # test batch evaluation
                X = torch.rand(
                    torch.Size([2]) + batch_shape + torch.Size([3, num_dim]),
                    **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(
                    posterior.mean.shape,
                    torch.Size([2]) + batch_shape +
                    torch.Size([3, num_outputs]),
                )
Esempio n. 7
0
 def _getModel(self, double=False):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device,
                              dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     train_y = torch.sin(train_x * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=self.device, dtype=dtype)
Esempio n. 8
0
    def test_sample_all_priors(self, cuda=False):
        device = torch.device("cuda" if cuda else "cpu")
        for dtype in (torch.float, torch.double):
            train_X = torch.rand(3, 5, device=device, dtype=dtype)
            train_Y = torch.rand(3, 1, device=device, dtype=dtype)
            model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll.to(device=device, dtype=dtype)
            original_state_dict = dict(deepcopy(mll.model.state_dict()))
            sample_all_priors(model)

            # make sure one of the hyperparameters changed
            self.assertTrue(
                dict(model.state_dict())["likelihood.noise_covar.raw_noise"] !=
                original_state_dict["likelihood.noise_covar.raw_noise"])
            # check that lengthscales are all different
            ls = model.covar_module.base_kernel.raw_lengthscale.view(
                -1).tolist()
            self.assertTrue(all(ls[0] != ls[i]) for i in range(1, len(ls)))

            # change one of the priors to SmoothedBoxPrior
            model.covar_module = ScaleKernel(
                MaternKernel(
                    nu=2.5,
                    ard_num_dims=model.train_inputs[0].shape[-1],
                    batch_shape=model._aug_batch_shape,
                    lengthscale_prior=SmoothedBoxPrior(3.0, 6.0),
                ),
                batch_shape=model._aug_batch_shape,
                outputscale_prior=GammaPrior(2.0, 0.15),
            )
            original_state_dict = dict(deepcopy(mll.model.state_dict()))
            with warnings.catch_warnings(
                    record=True) as ws, settings.debug(True):
                sample_all_priors(model)
                self.assertEqual(len(ws), 1)
                self.assertTrue("rsample" in str(ws[0].message))

            # the lengthscale should not have changed because sampling is
            # not implemented for SmoothedBoxPrior
            self.assertTrue(
                torch.equal(
                    dict(model.state_dict())
                    ["covar_module.base_kernel.raw_lengthscale"],
                    original_state_dict[
                        "covar_module.base_kernel.raw_lengthscale"],
                ))

            # set setting_closure to None and make sure RuntimeError is raised
            prior_tuple = model.likelihood.noise_covar._priors["noise_prior"]
            model.likelihood.noise_covar._priors["noise_prior"] = (
                prior_tuple[0],
                prior_tuple[1],
                None,
            )
            with self.assertRaises(RuntimeError):
                sample_all_priors(model)
Esempio n. 9
0
 def _getModel(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y = torch.sin(train_x.view(-1) * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
Esempio n. 10
0
 def test_fit_gpytorch_model_singular(self, cuda=False):
     options = {"disp": False, "maxiter": 5}
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=device, dtype=dtype)
         Y_train = torch.zeros(2, device=device, dtype=dtype)
         test_likelihood = GaussianLikelihood(noise_constraint=GreaterThan(
             -1.0, transform=None, initial_value=0.0))
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         fit_gpytorch_model(mll, options=options, max_retries=2)
Esempio n. 11
0
    def test_SACGP(self):
        train_X = torch.tensor(
            [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]
        )
        train_Y = torch.tensor([[1.0], [2.0], [3.0]])
        train_Yvar = 0.01 * torch.ones(3, 1, dtype=torch.double)
        self.decomposition = {"1": [0, 3], "2": [1, 2]}

        self.model = SACGP(train_X, train_Y, train_Yvar, self.decomposition)
        mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
        fit_gpytorch_model(mll, options={"maxiter": 1})

        self.assertIsInstance(self.model, FixedNoiseGP)
        self.assertDictEqual(self.model.decomposition, self.decomposition)
        self.assertIsInstance(self.model.mean_module, ConstantMean)
        self.assertIsInstance(self.model.covar_module, SACKernel)

        # test number of named parameters
        num_of_mean = 0
        num_of_lengthscales = 0
        num_of_outputscales = 0
        for param_name, param in self.model.named_parameters():
            if param_name == "mean_module.constant":
                num_of_mean += param.data.shape.numel()
            elif "raw_lengthscale" in param_name:
                num_of_lengthscales += param.data.shape.numel()
            elif "raw_outputscale" in param_name:
                num_of_outputscales += param.data.shape.numel()
        self.assertEqual(num_of_mean, 1)
        self.assertEqual(num_of_lengthscales, 2)
        self.assertEqual(num_of_outputscales, 2)

        test_x = torch.rand(5, 4)
        posterior = self.model(test_x)
        self.assertIsInstance(posterior, MultivariateNormal)
Esempio n. 12
0
def get_map_model(
    train_X: Tensor,
    train_Y: Tensor,
    train_Yvar: Tensor,
    decomposition: Dict[str, List[int]],
    train_embedding: bool = True,
    cat_feature_dict: Optional[Dict] = None,
    embs_feature_dict: Optional[Dict] = None,
    embs_dim_list: Optional[List[int]] = None,
    context_weight_dict: Optional[Dict] = None,
) -> Tuple[LCEAGP, ExactMarginalLogLikelihood]:
    """Obtain MAP fitting of Latent Context Embedding Additive (LCE-A) GP."""
    # assert train_X is non-batched
    assert train_X.dim() < 3, "Don't support batch training"
    model = LCEAGP(
        train_X=train_X,
        train_Y=train_Y,
        train_Yvar=train_Yvar,
        decomposition=decomposition,
        train_embedding=train_embedding,
        embs_dim_list=embs_dim_list,
        cat_feature_dict=cat_feature_dict,
        embs_feature_dict=embs_feature_dict,
        context_weight_dict=context_weight_dict,
    )
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    fit_gpytorch_model(mll)
    return model, mll
Esempio n. 13
0
 def test_exclude(self):
     for dtype in (torch.float, torch.double):
         # get a test module
         train_x = torch.tensor([[1.0, 2.0, 3.0]],
                                device=self.device,
                                dtype=dtype)
         train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
         likelihood = GaussianLikelihood()
         model = ExactGP(train_x, train_y, likelihood)
         model.covar_module = RBFKernel(ard_num_dims=3)
         model.mean_module = ConstantMean()
         model.to(device=self.device, dtype=dtype)
         mll = ExactMarginalLogLikelihood(likelihood, model)
         # test the basic case
         x, pdict, bounds = module_to_array(
             module=mll, exclude={"model.mean_module.constant"})
         self.assertTrue(np.array_equal(x, np.zeros(4)))
         expected_sizes = {
             "likelihood.noise_covar.raw_noise": torch.Size([1]),
             "model.covar_module.raw_lengthscale": torch.Size([1, 3]),
         }
         self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
         for pname, val in pdict.items():
             self.assertEqual(val.dtype, dtype)
             self.assertEqual(val.shape, expected_sizes[pname])
             self.assertEqual(val.device.type, self.device.type)
         self.assertIsNone(bounds)
Esempio n. 14
0
    def testFixedNoiseLCEMGP(self):
        d = 1
        for dtype in (torch.float, torch.double):
            train_x = torch.rand(10, d, device=self.device, dtype=dtype)
            train_y = torch.cos(train_x)
            task_indices = torch.tensor(
                [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
                device=self.device)
            train_x = torch.cat([train_x, task_indices.unsqueeze(-1)], axis=1)
            train_yvar = torch.ones(10, 1, device=self.device,
                                    dtype=dtype) * 0.01

            model = FixedNoiseLCEMGP(train_X=train_x,
                                     train_Y=train_y,
                                     train_Yvar=train_yvar,
                                     task_feature=d)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_model(mll, options={"maxiter": 1})

            self.assertIsInstance(model, FixedNoiseLCEMGP)

            test_x = torch.rand(5, d, device=self.device, dtype=dtype)
            task_indices = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0],
                                        device=self.device,
                                        dtype=dtype)
            test_x = torch.cat(
                [test_x, task_indices.unsqueeze(-1)],
                axis=1,
            )
            self.assertIsInstance(model(test_x), MultivariateNormal)
Esempio n. 15
0
    def test_FixedNoiseMultiTaskGP_single_output(self):
        for dtype in (torch.float, torch.double):
            tkwargs = {"device": self.device, "dtype": dtype}
            model = _get_fixed_noise_model_single_output(**tkwargs)
            self.assertIsInstance(model, FixedNoiseMultiTaskGP)
            self.assertEqual(model.num_outputs, 1)
            self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(
                model.task_covar_module.covar_factor.shape[-1], model._rank
            )

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
Esempio n. 16
0
 def _setUp(self, double=False, cuda=False, expand=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device,
                              dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi))
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     if expand:
         self.train_x = self.train_x.expand(-1, 2)
         ics = torch.tensor([[0.5, 1.0]], device=device, dtype=dtype)
     else:
         ics = torch.tensor([[0.5]], device=device, dtype=dtype)
     self.initial_conditions = ics
     self.f_best = self.train_y.max().item()
     model = SingleTaskGP(self.train_x, self.train_y)
     self.model = model.to(device=device, dtype=dtype)
     self.mll = ExactMarginalLogLikelihood(self.model.likelihood,
                                           self.model)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll = fit_gpytorch_model(self.mll,
                                       options={"maxiter": 1},
                                       max_retries=1)
Esempio n. 17
0
def initialize_model(train_x, train_obj):
    # define models for objective and constraint
    model = SingleTaskGP(train_x,
                         train_obj,
                         outcome_transform=Standardize(m=train_obj.shape[-1]))
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    return mll, model
Esempio n. 18
0
 def test_manual_bounds(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # get a test module
         train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype)
         train_y = torch.tensor([4.0], device=device, dtype=dtype)
         likelihood = GaussianLikelihood()
         model = ExactGP(train_x, train_y, likelihood)
         model.covar_module = RBFKernel(ard_num_dims=3)
         model.mean_module = ConstantMean()
         model.to(device=device, dtype=dtype)
         mll = ExactMarginalLogLikelihood(likelihood, model)
         # test the basic case
         x, pdict, bounds = module_to_array(
             module=mll, bounds={"model.covar_module.raw_lengthscale": (0.1, None)}
         )
         self.assertTrue(np.array_equal(x, np.zeros(5)))
         expected_sizes = {
             "likelihood.noise_covar.raw_noise": torch.Size([1]),
             "model.covar_module.raw_lengthscale": torch.Size([1, 3]),
             "model.mean_module.constant": torch.Size([1]),
         }
         self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
         for pname, val in pdict.items():
             self.assertEqual(val.dtype, dtype)
             self.assertEqual(val.shape, expected_sizes[pname])
             self.assertEqual(val.device.type, device.type)
         lower_exp = np.full_like(x, 0.1)
         for p in ("likelihood.noise_covar.raw_noise", "model.mean_module.constant"):
             lower_exp[_get_index(pdict, p)] = -np.inf
         self.assertTrue(np.equal(bounds[0], lower_exp).all())
         self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
Esempio n. 19
0
    def test_MultiTaskGP_single_output(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model_single_output(**tkwargs)
            self.assertIsInstance(model, MultiTaskGP)
            self.assertIsInstance(model.likelihood, GaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(model.task_covar_module.covar_factor.shape[-1],
                             model._rank)

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
Esempio n. 20
0
    def test_get_extra_mll_args(self):
        train_X = torch.rand(3, 5)
        train_Y = torch.rand(3)
        model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
        # test ExactMarginalLogLikelihood
        exact_mll = ExactMarginalLogLikelihood(model.likelihood, model)
        exact_extra_args = _get_extra_mll_args(mll=exact_mll)
        self.assertEqual(len(exact_extra_args), 1)
        self.assertTrue(torch.equal(exact_extra_args[0], train_X))

        # test VariationalELBO
        elbo = VariationalELBO(model.likelihood, model, num_data=train_X.shape[0])
        elbo_extra_args = _get_extra_mll_args(mll=elbo)
        self.assertEqual(len(elbo_extra_args), 0)

        # test SumMarginalLogLikelihood
        model2 = ModelListGP(gp_models=[model])
        sum_mll = SumMarginalLogLikelihood(model2.likelihood, model2)
        sum_mll_extra_args = _get_extra_mll_args(mll=sum_mll)
        self.assertEqual(len(sum_mll_extra_args), 1)
        self.assertEqual(len(sum_mll_extra_args[0]), 1)
        self.assertTrue(torch.equal(sum_mll_extra_args[0][0], train_X))

        # test unsupported MarginalLogLikelihood type
        unsupported_mll = MarginalLogLikelihood(model.likelihood, model)
        with self.assertRaises(ValueError):
            _get_extra_mll_args(mll=unsupported_mll)
Esempio n. 21
0
    def testLCEAGP(self):
        for dtype in (torch.float, torch.double):
            train_X = torch.tensor(
                [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0],
                 [2.0, 2.0, 2.0, 2.0]],
                device=self.device,
                dtype=dtype,
            )
            train_Y = torch.tensor([[1.0], [2.0], [3.0]],
                                   device=self.device,
                                   dtype=dtype)
            train_Yvar = 0.01 * torch.ones(
                3, 1, device=self.device, dtype=dtype)
            # Test setting attributes
            decomposition = {"1": [0, 1], "2": [2, 3]}

            # test instantiate model
            model = LCEAGP(
                train_X=train_X,
                train_Y=train_Y,
                train_Yvar=train_Yvar,
                decomposition=decomposition,
            )
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_model(mll, options={"maxiter": 1})

            self.assertIsInstance(model, LCEAGP)
            self.assertIsInstance(model.covar_module, LCEAKernel)
            self.assertDictEqual(model.decomposition, decomposition)

            test_x = torch.rand(5, 4, device=self.device, dtype=dtype)
            posterior = model(test_x)
            self.assertIsInstance(posterior, MultivariateNormal)
Esempio n. 22
0
    def test_gp(self, cuda=False):
        for batch_shape in (torch.Size([]), torch.Size([2])):
            for num_outputs in (1, 2):
                for double in (False, True):
                    tkwargs = {
                        "device":
                        torch.device("cuda") if cuda else torch.device("cpu"),
                        "dtype":
                        torch.double if double else torch.float,
                    }
                    model, _ = self._get_model_and_data(
                        batch_shape=batch_shape,
                        num_outputs=num_outputs,
                        **tkwargs)
                    mll = ExactMarginalLogLikelihood(model.likelihood,
                                                     model).to(**tkwargs)
                    fit_gpytorch_model(mll, options={"maxiter": 1})
                    # test init
                    self.assertIsInstance(model.mean_module, ConstantMean)
                    self.assertIsInstance(model.covar_module, ScaleKernel)
                    matern_kernel = model.covar_module.base_kernel
                    self.assertIsInstance(matern_kernel, MaternKernel)
                    self.assertIsInstance(matern_kernel.lengthscale_prior,
                                          GammaPrior)

                    # Test forward
                    test_x = torch.rand(batch_shape + torch.Size([3, 1]),
                                        **tkwargs)
                    posterior = model(test_x)
                    self.assertIsInstance(posterior, MultivariateNormal)

                    # test param sizes
                    params = dict(model.named_parameters())
                    for p in params:
                        self.assertEqual(
                            params[p].numel(),
                            num_outputs *
                            torch.tensor(batch_shape).prod().item(),
                        )

                    # test posterior
                    # test non batch evaluation
                    X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        batch_shape + torch.Size([3, num_outputs]))
                    # test batch evaluation
                    X = torch.rand(
                        torch.Size([2]) + batch_shape + torch.Size([3, 1]),
                        **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        torch.Size([2]) + batch_shape +
                        torch.Size([3, num_outputs]),
                    )
Esempio n. 23
0
 def test_fit_gpytorch_model_singular(self):
     options = {"disp": False, "maxiter": 5}
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=self.device, dtype=dtype)
         Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
         test_likelihood = GaussianLikelihood(
             noise_constraint=GreaterThan(-1.0, transform=None, initial_value=0.0)
         )
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             fit_gpytorch_model(mll, options=options, max_retries=2)
             self.assertTrue(
                 any(issubclass(w.category, OptimizationWarning) for w in ws)
             )
Esempio n. 24
0
def get_fitted_model(train_x, train_obj, state_dict=None):
    # initialize and fit model
    model = SingleTaskGP(train_X=train_x, train_Y=train_obj)

    # # initialize likelihood and model
    # likelihood = gpytorch.likelihoods.GaussianLikelihood()
    # model = ExactGPModel(train_x, train_obj, likelihood)
    # model.train()
    # likelihood.train()

    if state_dict is not None:
        model.load_state_dict(state_dict)
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    mll.to(train_x)
    fit_gpytorch_model(mll)

    return model
Esempio n. 25
0
    def test_set_parameters(self):
        for dtype in (torch.float, torch.double):
            # get a test module
            train_x = torch.tensor([[1.0, 2.0, 3.0]],
                                   device=self.device,
                                   dtype=dtype)
            train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
            likelihood = GaussianLikelihood()
            model = ExactGP(train_x, train_y, likelihood)
            model.covar_module = RBFKernel(ard_num_dims=3)
            model.mean_module = ConstantMean()
            model.to(device=self.device, dtype=dtype)
            mll = ExactMarginalLogLikelihood(likelihood, model)
            # get parameters
            x, pdict, bounds = module_to_array(module=mll)

            # Set parameters
            mll = set_params_with_array(mll,
                                        np.array([1.0, 2.0, 3.0, 4.0,
                                                  5.0]), pdict)
            z = dict(mll.named_parameters())
            self.assertTrue(
                torch.equal(
                    z["likelihood.noise_covar.raw_noise"],
                    torch.tensor([1.0], device=self.device, dtype=dtype),
                ))
            self.assertTrue(
                torch.equal(
                    z["model.covar_module.raw_lengthscale"],
                    torch.tensor([[2.0, 3.0, 4.0]],
                                 device=self.device,
                                 dtype=dtype),
                ))
            self.assertTrue(
                torch.equal(
                    z["model.mean_module.constant"],
                    torch.tensor([5.0], device=self.device, dtype=dtype),
                ))

            # Extract again
            x2, pdict2, bounds2 = module_to_array(module=mll)
            self.assertTrue(
                np.array_equal(x2, np.array([1.0, 2.0, 3.0, 4.0, 5.0])))
Esempio n. 26
0
 def _get_model(self, batch_shape, num_outputs, likelihood=None, **tkwargs):
     train_x, train_y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         **tkwargs)
     model = SingleTaskGP(train_X=train_x,
                          train_Y=train_y,
                          likelihood=likelihood)
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
Esempio n. 27
0
 def _get_model(self, batch_shape, num_outputs, **tkwargs):
     train_x, train_y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         **tkwargs)
     train_yvar = (0.1 + 0.1 * torch.rand_like(train_y))**2
     model = HeteroskedasticSingleTaskGP(train_X=train_x,
                                         train_Y=train_y,
                                         train_Yvar=train_yvar)
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
Esempio n. 28
0
 def test_set_transformed_inputs(self):
     # This intended to catch https://github.com/pytorch/botorch/issues/1078.
     # More general testing of _set_transformed_inputs is done under ModelListGP.
     X = torch.rand(5, 2)
     Y = X**2
     for tf_class in [Normalize, InputStandardize]:
         intf = tf_class(d=2)
         model = SingleTaskGP(X, Y, input_transform=intf)
         mll = ExactMarginalLogLikelihood(model.likelihood, model)
         fit_gpytorch_model(mll, options={"maxiter": 2})
         tf_X = intf(X)
         self.assertEqual(X.shape, tf_X.shape)
Esempio n. 29
0
 def _getBatchedModel(self, kind="SingleTaskGP", double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device,
                              dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y1 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y2 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y = torch.cat([train_y1, train_y2], dim=-1)
     if kind == "SingleTaskGP":
         model = SingleTaskGP(train_x, train_y)
     elif kind == "FixedNoiseGP":
         model = FixedNoiseGP(train_x, train_y,
                              0.1 * torch.ones_like(train_y))
     elif kind == "HeteroskedasticSingleTaskGP":
         model = HeteroskedasticSingleTaskGP(train_x, train_y,
                                             0.1 * torch.ones_like(train_y))
     else:
         raise NotImplementedError
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
Esempio n. 30
0
    def test_set_parameters(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            # get a test module
            train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype)
            train_y = torch.tensor([4.0], device=device, dtype=dtype)
            likelihood = GaussianLikelihood()
            model = ExactGP(train_x, train_y, likelihood)
            model.covar_module = RBFKernel(ard_num_dims=3)
            model.mean_module = ConstantMean()
            model.to(device=device, dtype=dtype)
            mll = ExactMarginalLogLikelihood(likelihood, model)
            # get parameters
            x, pdict, bounds = module_to_array(module=mll)

            # Set parameters
            mll = set_params_with_array(mll, np.array([1.0, 2.0, 3.0, 4.0, 5.0]), pdict)
            z = dict(mll.named_parameters())
            self.assertTrue(
                torch.equal(
                    z["likelihood.noise_covar.raw_noise"],
                    torch.tensor([1.0], device=device, dtype=dtype),
                )
            )
            self.assertTrue(
                torch.equal(
                    z["model.covar_module.raw_lengthscale"],
                    torch.tensor([[2.0, 3.0, 4.0]], device=device, dtype=dtype),
                )
            )
            self.assertTrue(
                torch.equal(
                    z["model.mean_module.constant"],
                    torch.tensor([5.0], device=device, dtype=dtype),
                )
            )

            # Extract again
            x2, pdict2, bounds2 = module_to_array(module=mll)
            self.assertTrue(np.array_equal(x2, np.array([1.0, 2.0, 3.0, 4.0, 5.0])))
Esempio n. 31
0
def get_and_fit_ARDRBF(
    Xs,
    Ys,
    Yvars,
    task_features=None,
    fidelity_features=None,
    refit_model=None,
    state_dict=None,
    fidelity_model_id=None,
    metric_names=None,
):
    m = ARDRBFGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0])
    mll = ExactMarginalLogLikelihood(m.likelihood, m)
    mll = fit_gpytorch_model(mll)
    return m
Esempio n. 32
0
def fixed_noise_gp_model_constructor(
    Xs: List[Tensor],
    Ys: List[Tensor],
    Yvars: List[Tensor],  # Maybe these should be optional where irrelevant?
    task_features: List[int],
    fidelity_features: List[int],
    metric_names: List[str],
    state_dict: Optional[Dict[str, Tensor]] = None,
    refit_model: bool = True,
    **kwargs: Any,
) -> Model:
    gp = FixedNoiseGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0], **kwargs)
    gp.to(Xs[0])
    if state_dict is not None:
        gp.load_state_dict(state_dict)
    if state_dict is None or refit_model:
        fit_gpytorch_model(ExactMarginalLogLikelihood(gp.likelihood, gp))
    return gp