Esempio n. 1
0
 def test_fit_gpytorch_model_singular(self):
     options = {"disp": False, "maxiter": 5}
     for dtype in (torch.float, torch.double):
         X_train = torch.ones(2, 2, device=self.device, dtype=dtype)
         Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
         test_likelihood = GaussianLikelihood(
             noise_constraint=GreaterThan(-1e-7, transform=None, initial_value=0.0)
         )
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             fit_gpytorch_model(mll, options=options, max_retries=2)
             self.assertTrue(
                 any(issubclass(w.category, NumericalWarning) for w in ws)
             )
         # ensure that we fail if noise ensures that jitter does not help
         gp.likelihood = GaussianLikelihood(
             noise_constraint=Interval(-2, -1, transform=None, initial_value=-1.5)
         )
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         with self.assertRaises(NotPSDError):
             fit_gpytorch_model(mll, options=options, max_retries=2)
         # ensure we can handle NaNErrors in the optimizer
         with mock.patch.object(SingleTaskGP, "__call__", side_effect=NanError):
             gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
             mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
             mll.to(device=self.device, dtype=dtype)
             fit_gpytorch_model(
                 mll, options={"disp": False, "maxiter": 1}, max_retries=1
             )
Esempio n. 2
0
 def test_set_transformed_inputs(self):
     for dtype in (torch.float, torch.double):
         train_x = torch.rand(5, 1, dtype=dtype, device=self.device)
         train_y = torch.rand(5, 1, dtype=dtype, device=self.device)
         tf = Normalize(
             d=1,
             bounds=torch.tensor([[0.0], [2.0]], dtype=dtype, device=self.device),
             transform_on_preprocess=False,
         )
         model = SingleTaskGP(train_x, train_y, input_transform=tf)
         self.assertTrue(torch.equal(model.train_inputs[0], train_x))
         mll = ExactMarginalLogLikelihood(model.likelihood, model)
         # check that input transform is only applied when the transform
         # is a transform_on_preprocess is True
         self.assertTrue(torch.equal(model.train_inputs[0], train_x))
         tf.transform_on_preprocess = True
         _set_transformed_inputs(mll)
         self.assertTrue(torch.equal(model.train_inputs[0], tf(train_x)))
         model.eval()
         # test no set_train_data method
         mock_model = MockGP(MockPosterior())
         mock_model.train_inputs = (train_x,)
         mock_model.likelihood = model.likelihood
         mock_model.input_transform = tf
         mll = ExactMarginalLogLikelihood(mock_model.likelihood, mock_model)
         with self.assertRaises(BotorchError):
             _set_transformed_inputs(mll)
Esempio n. 3
0
 def _setUp(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     train_yvar = torch.tensor(0.1 ** 2, device=device)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]], device=device, dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(
         self.model_st.likelihood, self.model_st
     )
     self.mll_st = fit_gpytorch_model(self.mll_st, options={"maxiter": 5})
     model_fn = FixedNoiseGP(
         self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
     )
     self.model_fn = model_fn.to(device=device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(
         self.model_fn.likelihood, self.model_fn
     )
     self.mll_fn = fit_gpytorch_model(self.mll_fn, options={"maxiter": 5})
Esempio n. 4
0
    def test_fit_gpytorch_model_singular(self):
        options = {"disp": False, "maxiter": 5}
        for dtype in (torch.float, torch.double):
            X_train = torch.ones(2, 2, device=self.device, dtype=dtype)
            Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
            test_likelihood = GaussianLikelihood(
                noise_constraint=GreaterThan(-1e-7, transform=None, initial_value=0.0)
            )
            gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            mll.to(device=self.device, dtype=dtype)
            # this will do multiple retries (and emit warnings, which is desired)
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                fit_gpytorch_model(mll, options=options, max_retries=2)
                self.assertTrue(
                    any(issubclass(w.category, NumericalWarning) for w in ws)
                )
            # ensure that we fail if noise ensures that jitter does not help
            gp.likelihood = GaussianLikelihood(
                noise_constraint=Interval(-2, -1, transform=None, initial_value=-1.5)
            )
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            mll.to(device=self.device, dtype=dtype)
            with self.assertLogs(level="DEBUG") as logs:
                fit_gpytorch_model(mll, options=options, max_retries=2)
            self.assertTrue(any("NotPSDError" in log for log in logs.output))
            # ensure we can handle NaNErrors in the optimizer
            with mock.patch.object(SingleTaskGP, "__call__", side_effect=NanError):
                gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
                mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
                mll.to(device=self.device, dtype=dtype)
                fit_gpytorch_model(
                    mll, options={"disp": False, "maxiter": 1}, max_retries=1
                )
            # ensure we catch NotPSDErrors
            with mock.patch.object(SingleTaskGP, "__call__", side_effect=NotPSDError):
                mll = self._getModel()
                with self.assertLogs(level="DEBUG") as logs:
                    fit_gpytorch_model(mll, max_retries=2)
                for retry in [1, 2]:
                    self.assertTrue(
                        any(
                            f"Fitting failed on try {retry} due to a NotPSDError."
                            in log
                            for log in logs.output
                        )
                    )

            # Failure due to optimization warning

            def optimize_w_warning(mll, **kwargs):
                warnings.warn("Dummy warning.", OptimizationWarning)
                return mll, None

            mll = self._getModel()
            with self.assertLogs(level="DEBUG") as logs, settings.debug(True):
                fit_gpytorch_model(mll, optimizer=optimize_w_warning, max_retries=2)
            self.assertTrue(
                any("Fitting failed on try 1." in log for log in logs.output)
            )
Esempio n. 5
0
 def _setUp(self, double=False):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device,
                              dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi))
     train_yvar = torch.tensor(0.1**2, device=self.device)
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]],
                                device=self.device,
                                dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=self.device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(self.model_st.likelihood,
                                              self.model_st)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll_st = fit_gpytorch_model(self.mll_st,
                                          options={"maxiter": 5},
                                          max_retries=1)
     model_fn = FixedNoiseGP(self.train_x, self.train_y,
                             self.train_yvar.expand_as(self.train_y))
     self.model_fn = model_fn.to(device=self.device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(self.model_fn.likelihood,
                                              self.model_fn)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll_fn = fit_gpytorch_model(self.mll_fn,
                                          options={"maxiter": 5},
                                          max_retries=1)
Esempio n. 6
0
def get_map_model(
    B: Tensor,
    train_X: Tensor,
    train_Y: Tensor,
    train_Yvar: Tensor,
    restarts: int,
    init_state_dict: Optional[Dict[str, Tensor]],
) -> ExactMarginalLogLikelihood:
    """Do random-restart optimization for MAP fitting of an ALEBO GP model.

    Args:
        B: Projection matrix.
        train_X: X training data.
        train_Y: Y training data.
        train_Yvar: Noise variances of each training Y.
        restarts: Number of restarts for MAP estimation.
        init_state_dict: Optionally begin MAP estimation with this state dict.

    Returns: non-batch ALEBO GP with MAP kernel hyperparameters.
    """
    f_best = 1e8
    sd_best = {}
    # Fit with random restarts
    for _ in range(restarts):
        m = ALEBOGP(B=B,
                    train_X=train_X,
                    train_Y=train_Y,
                    train_Yvar=train_Yvar)
        if init_state_dict is not None:
            # pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st
            #  param but got `Dict[str, Tensor]`.
            m.load_state_dict(init_state_dict)
        mll = ExactMarginalLogLikelihood(m.likelihood, m)
        mll.train()
        mll, info_dict = fit_gpytorch_scipy(mll,
                                            track_iterations=False,
                                            method="tnc")
        logger.debug(info_dict)
        # pyre-fixme[58]: `<` is not supported for operand types
        #  `Union[List[botorch.optim.fit.OptimizationIteration], float]` and `float`.
        if info_dict["fopt"] < f_best:
            f_best = float(info_dict["fopt"])  # pyre-ignore
            sd_best = m.state_dict()
    # Set the final value
    m = ALEBOGP(B=B, train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
    # pyre-fixme[6]: Expected `OrderedDict[str, Tensor]` for 1st param but got
    #  `Dict[typing.Any, typing.Any]`.
    m.load_state_dict(sd_best)
    mll = ExactMarginalLogLikelihood(m.likelihood, m)
    return mll
Esempio n. 7
0
    def test_MultiTaskGP_single_output(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model_single_output(**tkwargs)
            self.assertIsInstance(model, MultiTaskGP)
            self.assertIsInstance(model.likelihood, GaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(model.task_covar_module.covar_factor.shape[-1],
                             model._rank)

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
Esempio n. 8
0
    def testLCEAGP(self):
        for dtype in (torch.float, torch.double):
            train_X = torch.tensor(
                [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0],
                 [2.0, 2.0, 2.0, 2.0]],
                device=self.device,
                dtype=dtype,
            )
            train_Y = torch.tensor([[1.0], [2.0], [3.0]],
                                   device=self.device,
                                   dtype=dtype)
            train_Yvar = 0.01 * torch.ones(
                3, 1, device=self.device, dtype=dtype)
            # Test setting attributes
            decomposition = {"1": [0, 1], "2": [2, 3]}

            # test instantiate model
            model = LCEAGP(
                train_X=train_X,
                train_Y=train_Y,
                train_Yvar=train_Yvar,
                decomposition=decomposition,
            )
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_model(mll, options={"maxiter": 1})

            self.assertIsInstance(model, LCEAGP)
            self.assertIsInstance(model.covar_module, LCEAKernel)
            self.assertDictEqual(model.decomposition, decomposition)

            test_x = torch.rand(5, 4, device=self.device, dtype=dtype)
            posterior = model(test_x)
            self.assertIsInstance(posterior, MultivariateNormal)
Esempio n. 9
0
 def test_exclude(self):
     for dtype in (torch.float, torch.double):
         # get a test module
         train_x = torch.tensor([[1.0, 2.0, 3.0]],
                                device=self.device,
                                dtype=dtype)
         train_y = torch.tensor([4.0], device=self.device, dtype=dtype)
         likelihood = GaussianLikelihood()
         model = ExactGP(train_x, train_y, likelihood)
         model.covar_module = RBFKernel(ard_num_dims=3)
         model.mean_module = ConstantMean()
         model.to(device=self.device, dtype=dtype)
         mll = ExactMarginalLogLikelihood(likelihood, model)
         # test the basic case
         x, pdict, bounds = module_to_array(
             module=mll, exclude={"model.mean_module.constant"})
         self.assertTrue(np.array_equal(x, np.zeros(4)))
         expected_sizes = {
             "likelihood.noise_covar.raw_noise": torch.Size([1]),
             "model.covar_module.raw_lengthscale": torch.Size([1, 3]),
         }
         self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
         for pname, val in pdict.items():
             self.assertEqual(val.dtype, dtype)
             self.assertEqual(val.shape, expected_sizes[pname])
             self.assertEqual(val.device.type, self.device.type)
         self.assertIsNone(bounds)
Esempio n. 10
0
 def _setUp(self, double=False, cuda=False, expand=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device,
                              dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi))
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     if expand:
         self.train_x = self.train_x.expand(-1, 2)
         ics = torch.tensor([[0.5, 1.0]], device=device, dtype=dtype)
     else:
         ics = torch.tensor([[0.5]], device=device, dtype=dtype)
     self.initial_conditions = ics
     self.f_best = self.train_y.max().item()
     model = SingleTaskGP(self.train_x, self.train_y)
     self.model = model.to(device=device, dtype=dtype)
     self.mll = ExactMarginalLogLikelihood(self.model.likelihood,
                                           self.model)
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=OptimizationWarning)
         self.mll = fit_gpytorch_model(self.mll,
                                       options={"maxiter": 1},
                                       max_retries=1)
Esempio n. 11
0
    def testFixedNoiseLCEMGP(self):
        d = 1
        for dtype in (torch.float, torch.double):
            train_x = torch.rand(10, d, device=self.device, dtype=dtype)
            train_y = torch.cos(train_x)
            task_indices = torch.tensor(
                [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
                device=self.device)
            train_x = torch.cat([train_x, task_indices.unsqueeze(-1)], axis=1)
            train_yvar = torch.ones(10, 1, device=self.device,
                                    dtype=dtype) * 0.01

            model = FixedNoiseLCEMGP(train_X=train_x,
                                     train_Y=train_y,
                                     train_Yvar=train_yvar,
                                     task_feature=d)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_model(mll, options={"maxiter": 1})

            self.assertIsInstance(model, FixedNoiseLCEMGP)

            test_x = torch.rand(5, d, device=self.device, dtype=dtype)
            task_indices = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0],
                                        device=self.device,
                                        dtype=dtype)
            test_x = torch.cat(
                [test_x, task_indices.unsqueeze(-1)],
                axis=1,
            )
            self.assertIsInstance(model(test_x), MultivariateNormal)
Esempio n. 12
0
def get_map_model(
    train_X: Tensor,
    train_Y: Tensor,
    train_Yvar: Tensor,
    decomposition: Dict[str, List[int]],
    train_embedding: bool = True,
    cat_feature_dict: Optional[Dict] = None,
    embs_feature_dict: Optional[Dict] = None,
    embs_dim_list: Optional[List[int]] = None,
    context_weight_dict: Optional[Dict] = None,
) -> Tuple[LCEAGP, ExactMarginalLogLikelihood]:
    """Obtain MAP fitting of Latent Context Embedding Additive (LCE-A) GP."""
    # assert train_X is non-batched
    assert train_X.dim() < 3, "Don't support batch training"
    model = LCEAGP(
        train_X=train_X,
        train_Y=train_Y,
        train_Yvar=train_Yvar,
        decomposition=decomposition,
        train_embedding=train_embedding,
        embs_dim_list=embs_dim_list,
        cat_feature_dict=cat_feature_dict,
        embs_feature_dict=embs_feature_dict,
        context_weight_dict=context_weight_dict,
    )
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    fit_gpytorch_model(mll)
    return model, mll
Esempio n. 13
0
def initialize_model(train_x, train_obj):
    # define models for objective and constraint
    model = SingleTaskGP(train_x,
                         train_obj,
                         outcome_transform=Standardize(m=train_obj.shape[-1]))
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    return mll, model
Esempio n. 14
0
    def test_SACGP(self):
        train_X = torch.tensor(
            [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]
        )
        train_Y = torch.tensor([[1.0], [2.0], [3.0]])
        train_Yvar = 0.01 * torch.ones(3, 1, dtype=torch.double)
        self.decomposition = {"1": [0, 3], "2": [1, 2]}

        self.model = SACGP(train_X, train_Y, train_Yvar, self.decomposition)
        mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
        fit_gpytorch_model(mll, options={"maxiter": 1})

        self.assertIsInstance(self.model, FixedNoiseGP)
        self.assertDictEqual(self.model.decomposition, self.decomposition)
        self.assertIsInstance(self.model.mean_module, ConstantMean)
        self.assertIsInstance(self.model.covar_module, SACKernel)

        # test number of named parameters
        num_of_mean = 0
        num_of_lengthscales = 0
        num_of_outputscales = 0
        for param_name, param in self.model.named_parameters():
            if param_name == "mean_module.constant":
                num_of_mean += param.data.shape.numel()
            elif "raw_lengthscale" in param_name:
                num_of_lengthscales += param.data.shape.numel()
            elif "raw_outputscale" in param_name:
                num_of_outputscales += param.data.shape.numel()
        self.assertEqual(num_of_mean, 1)
        self.assertEqual(num_of_lengthscales, 2)
        self.assertEqual(num_of_outputscales, 2)

        test_x = torch.rand(5, 4)
        posterior = self.model(test_x)
        self.assertIsInstance(posterior, MultivariateNormal)
Esempio n. 15
0
    def test_FixedNoiseMultiTaskGP_single_output(self):
        for dtype in (torch.float, torch.double):
            tkwargs = {"device": self.device, "dtype": dtype}
            model = _get_fixed_noise_model_single_output(**tkwargs)
            self.assertIsInstance(model, FixedNoiseMultiTaskGP)
            self.assertEqual(model.num_outputs, 1)
            self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(
                model.task_covar_module.covar_factor.shape[-1], model._rank
            )

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
Esempio n. 16
0
 def test_manual_bounds(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # get a test module
         train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype)
         train_y = torch.tensor([4.0], device=device, dtype=dtype)
         likelihood = GaussianLikelihood()
         model = ExactGP(train_x, train_y, likelihood)
         model.covar_module = RBFKernel(ard_num_dims=3)
         model.mean_module = ConstantMean()
         model.to(device=device, dtype=dtype)
         mll = ExactMarginalLogLikelihood(likelihood, model)
         # test the basic case
         x, pdict, bounds = module_to_array(
             module=mll, bounds={"model.covar_module.raw_lengthscale": (0.1, None)}
         )
         self.assertTrue(np.array_equal(x, np.zeros(5)))
         expected_sizes = {
             "likelihood.noise_covar.raw_noise": torch.Size([1]),
             "model.covar_module.raw_lengthscale": torch.Size([1, 3]),
             "model.mean_module.constant": torch.Size([1]),
         }
         self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
         for pname, val in pdict.items():
             self.assertEqual(val.dtype, dtype)
             self.assertEqual(val.shape, expected_sizes[pname])
             self.assertEqual(val.device.type, device.type)
         lower_exp = np.full_like(x, 0.1)
         for p in ("likelihood.noise_covar.raw_noise", "model.mean_module.constant"):
             lower_exp[_get_index(pdict, p)] = -np.inf
         self.assertTrue(np.equal(bounds[0], lower_exp).all())
         self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
Esempio n. 17
0
 def _getBatchedModel(
     self, kind="SingleTaskGP", double=False, outcome_transform=False
 ):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze(
         -1
     )
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     train_y1 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y2 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y = torch.cat([train_y1, train_y2], dim=-1)
     kwargs = {}
     if outcome_transform:
         kwargs["outcome_transform"] = Standardize(m=2)
     if kind == "SingleTaskGP":
         model = SingleTaskGP(train_x, train_y, **kwargs)
     elif kind == "FixedNoiseGP":
         model = FixedNoiseGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     elif kind == "HeteroskedasticSingleTaskGP":
         model = HeteroskedasticSingleTaskGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     else:
         raise NotImplementedError
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=self.device, dtype=dtype)
Esempio n. 18
0
    def test_get_extra_mll_args(self):
        train_X = torch.rand(3, 5)
        train_Y = torch.rand(3)
        model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
        # test ExactMarginalLogLikelihood
        exact_mll = ExactMarginalLogLikelihood(model.likelihood, model)
        exact_extra_args = _get_extra_mll_args(mll=exact_mll)
        self.assertEqual(len(exact_extra_args), 1)
        self.assertTrue(torch.equal(exact_extra_args[0], train_X))

        # test VariationalELBO
        elbo = VariationalELBO(model.likelihood, model, num_data=train_X.shape[0])
        elbo_extra_args = _get_extra_mll_args(mll=elbo)
        self.assertEqual(len(elbo_extra_args), 0)

        # test SumMarginalLogLikelihood
        model2 = ModelListGP(gp_models=[model])
        sum_mll = SumMarginalLogLikelihood(model2.likelihood, model2)
        sum_mll_extra_args = _get_extra_mll_args(mll=sum_mll)
        self.assertEqual(len(sum_mll_extra_args), 1)
        self.assertEqual(len(sum_mll_extra_args[0]), 1)
        self.assertTrue(torch.equal(sum_mll_extra_args[0][0], train_X))

        # test unsupported MarginalLogLikelihood type
        unsupported_mll = MarginalLogLikelihood(model.likelihood, model)
        with self.assertRaises(ValueError):
            _get_extra_mll_args(mll=unsupported_mll)
Esempio n. 19
0
    def test_gp(self):
        for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
            num_dim = 1 + (iteration_fidelity is not None) + (data_fidelity
                                                              is not None)
            for batch_shape, num_outputs, dtype, lin_trunc in itertools.product(
                (torch.Size(), torch.Size([2])),
                (1, 2),
                (torch.float, torch.double),
                (False, True),
            ):
                tkwargs = {"device": self.device, "dtype": dtype}
                model, _ = _get_model_and_data(
                    iteration_fidelity=iteration_fidelity,
                    data_fidelity=data_fidelity,
                    batch_shape=batch_shape,
                    num_outputs=num_outputs,
                    lin_truncated=lin_trunc,
                    **tkwargs,
                )
                mll = ExactMarginalLogLikelihood(model.likelihood, model)
                mll.to(**tkwargs)
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore",
                                            category=OptimizationWarning)
                    fit_gpytorch_model(mll,
                                       sequential=False,
                                       options={"maxiter": 1})

                # test init
                self.assertIsInstance(model.mean_module, ConstantMean)
                self.assertIsInstance(model.covar_module, ScaleKernel)

                # test param sizes
                params = dict(model.named_parameters())
                for p in params:
                    self.assertEqual(
                        params[p].numel(),
                        num_outputs * torch.tensor(batch_shape).prod().item(),
                    )

                # test posterior
                # test non batch evaluation
                X = torch.rand(batch_shape + torch.Size([3, num_dim]),
                               **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape,
                                 batch_shape + torch.Size([3, num_outputs]))
                # test batch evaluation
                X = torch.rand(
                    torch.Size([2]) + batch_shape + torch.Size([3, num_dim]),
                    **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(
                    posterior.mean.shape,
                    torch.Size([2]) + batch_shape +
                    torch.Size([3, num_outputs]),
                )
Esempio n. 20
0
    def test_gp(self, cuda=False):
        for batch_shape in (torch.Size([]), torch.Size([2])):
            for num_outputs in (1, 2):
                for double in (False, True):
                    tkwargs = {
                        "device":
                        torch.device("cuda") if cuda else torch.device("cpu"),
                        "dtype":
                        torch.double if double else torch.float,
                    }
                    model, _ = self._get_model_and_data(
                        batch_shape=batch_shape,
                        num_outputs=num_outputs,
                        **tkwargs)
                    mll = ExactMarginalLogLikelihood(model.likelihood,
                                                     model).to(**tkwargs)
                    fit_gpytorch_model(mll, options={"maxiter": 1})
                    # test init
                    self.assertIsInstance(model.mean_module, ConstantMean)
                    self.assertIsInstance(model.covar_module, ScaleKernel)
                    matern_kernel = model.covar_module.base_kernel
                    self.assertIsInstance(matern_kernel, MaternKernel)
                    self.assertIsInstance(matern_kernel.lengthscale_prior,
                                          GammaPrior)

                    # Test forward
                    test_x = torch.rand(batch_shape + torch.Size([3, 1]),
                                        **tkwargs)
                    posterior = model(test_x)
                    self.assertIsInstance(posterior, MultivariateNormal)

                    # test param sizes
                    params = dict(model.named_parameters())
                    for p in params:
                        self.assertEqual(
                            params[p].numel(),
                            num_outputs *
                            torch.tensor(batch_shape).prod().item(),
                        )

                    # test posterior
                    # test non batch evaluation
                    X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        batch_shape + torch.Size([3, num_outputs]))
                    # test batch evaluation
                    X = torch.rand(
                        torch.Size([2]) + batch_shape + torch.Size([3, 1]),
                        **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        torch.Size([2]) + batch_shape +
                        torch.Size([3, num_outputs]),
                    )
Esempio n. 21
0
 def _getModel(self, double=False):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device,
                              dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     train_y = torch.sin(train_x * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=self.device, dtype=dtype)
Esempio n. 22
0
    def test_sample_all_priors(self, cuda=False):
        device = torch.device("cuda" if cuda else "cpu")
        for dtype in (torch.float, torch.double):
            train_X = torch.rand(3, 5, device=device, dtype=dtype)
            train_Y = torch.rand(3, 1, device=device, dtype=dtype)
            model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll.to(device=device, dtype=dtype)
            original_state_dict = dict(deepcopy(mll.model.state_dict()))
            sample_all_priors(model)

            # make sure one of the hyperparameters changed
            self.assertTrue(
                dict(model.state_dict())["likelihood.noise_covar.raw_noise"] !=
                original_state_dict["likelihood.noise_covar.raw_noise"])
            # check that lengthscales are all different
            ls = model.covar_module.base_kernel.raw_lengthscale.view(
                -1).tolist()
            self.assertTrue(all(ls[0] != ls[i]) for i in range(1, len(ls)))

            # change one of the priors to SmoothedBoxPrior
            model.covar_module = ScaleKernel(
                MaternKernel(
                    nu=2.5,
                    ard_num_dims=model.train_inputs[0].shape[-1],
                    batch_shape=model._aug_batch_shape,
                    lengthscale_prior=SmoothedBoxPrior(3.0, 6.0),
                ),
                batch_shape=model._aug_batch_shape,
                outputscale_prior=GammaPrior(2.0, 0.15),
            )
            original_state_dict = dict(deepcopy(mll.model.state_dict()))
            with warnings.catch_warnings(
                    record=True) as ws, settings.debug(True):
                sample_all_priors(model)
                self.assertEqual(len(ws), 1)
                self.assertTrue("rsample" in str(ws[0].message))

            # the lengthscale should not have changed because sampling is
            # not implemented for SmoothedBoxPrior
            self.assertTrue(
                torch.equal(
                    dict(model.state_dict())
                    ["covar_module.base_kernel.raw_lengthscale"],
                    original_state_dict[
                        "covar_module.base_kernel.raw_lengthscale"],
                ))

            # set setting_closure to None and make sure RuntimeError is raised
            prior_tuple = model.likelihood.noise_covar._priors["noise_prior"]
            model.likelihood.noise_covar._priors["noise_prior"] = (
                prior_tuple[0],
                prior_tuple[1],
                None,
            )
            with self.assertRaises(RuntimeError):
                sample_all_priors(model)
Esempio n. 23
0
def get_map_model(
    B: Tensor,
    train_X: Tensor,
    train_Y: Tensor,
    train_Yvar: Tensor,
    restarts: int,
    init_state_dict: Optional[Dict[str, Tensor]],
) -> ExactMarginalLogLikelihood:
    """Do random-restart optimization for MAP fitting of an ALEBO GP model.

    Args:
        B: Projection matrix.
        train_X: X training data.
        train_Y: Y training data.
        train_Yvar: Noise variances of each training Y.
        restarts: Number of restarts for MAP estimation.
        init_state_dict: Optionally begin MAP estimation with this state dict.

    Returns: non-batch ALEBO GP with MAP kernel hyperparameters.
    """
    f_best = 1e8
    sd_best = {}
    # Fit with random restarts
    for _ in range(restarts):
        m = ALEBOGP(B=B,
                    train_X=train_X,
                    train_Y=train_Y,
                    train_Yvar=train_Yvar)
        if init_state_dict is not None:
            m.load_state_dict(init_state_dict)
        mll = ExactMarginalLogLikelihood(m.likelihood, m)
        mll.train()
        mll, info_dict = fit_gpytorch_scipy(mll,
                                            track_iterations=False,
                                            method="tnc")
        logger.debug(info_dict)
        if info_dict["fopt"] < f_best:
            f_best = float(info_dict["fopt"])  # pyre-ignore
            sd_best = m.state_dict()
    # Set the final value
    m = ALEBOGP(B=B, train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
    m.load_state_dict(sd_best)
    mll = ExactMarginalLogLikelihood(m.likelihood, m)
    return mll
Esempio n. 24
0
 def _get_model(self, batch_shape, num_outputs, likelihood=None, **tkwargs):
     train_x, train_y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         **tkwargs)
     model = SingleTaskGP(train_X=train_x,
                          train_Y=train_y,
                          likelihood=likelihood)
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
Esempio n. 25
0
 def _get_model(self, batch_shape, num_outputs, **tkwargs):
     train_x, train_y = _get_random_data(batch_shape=batch_shape,
                                         num_outputs=num_outputs,
                                         **tkwargs)
     train_yvar = (0.1 + 0.1 * torch.rand_like(train_y))**2
     model = HeteroskedasticSingleTaskGP(train_X=train_x,
                                         train_Y=train_y,
                                         train_Yvar=train_yvar)
     mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
     fit_gpytorch_model(mll, options={"maxiter": 1})
     return model
Esempio n. 26
0
 def test_set_transformed_inputs(self):
     # This intended to catch https://github.com/pytorch/botorch/issues/1078.
     # More general testing of _set_transformed_inputs is done under ModelListGP.
     X = torch.rand(5, 2)
     Y = X**2
     for tf_class in [Normalize, InputStandardize]:
         intf = tf_class(d=2)
         model = SingleTaskGP(X, Y, input_transform=intf)
         mll = ExactMarginalLogLikelihood(model.likelihood, model)
         fit_gpytorch_model(mll, options={"maxiter": 2})
         tf_X = intf(X)
         self.assertEqual(X.shape, tf_X.shape)
Esempio n. 27
0
 def test_fit_gpytorch_model_singular(self, cuda=False):
     options = {"disp": False, "maxiter": 5}
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=device, dtype=dtype)
         Y_train = torch.zeros(2, device=device, dtype=dtype)
         test_likelihood = GaussianLikelihood(noise_constraint=GreaterThan(
             -1.0, transform=None, initial_value=0.0))
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         fit_gpytorch_model(mll, options=options, max_retries=2)
Esempio n. 28
0
def get_and_fit_ARDRBF(
    Xs,
    Ys,
    Yvars,
    task_features=None,
    fidelity_features=None,
    refit_model=None,
    state_dict=None,
    fidelity_model_id=None,
    metric_names=None,
):
    m = ARDRBFGP(train_X=Xs[0], train_Y=Ys[0], train_Yvar=Yvars[0])
    mll = ExactMarginalLogLikelihood(m.likelihood, m)
    mll = fit_gpytorch_model(mll)
    return m
Esempio n. 29
0
def get_fitted_model(train_x, train_obj, state_dict=None):
    # initialize and fit model
    model = SingleTaskGP(train_X=train_x, train_Y=train_obj)

    # # initialize likelihood and model
    # likelihood = gpytorch.likelihoods.GaussianLikelihood()
    # model = ExactGPModel(train_x, train_obj, likelihood)
    # model.train()
    # likelihood.train()

    if state_dict is not None:
        model.load_state_dict(state_dict)
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    mll.to(train_x)
    fit_gpytorch_model(mll)

    return model
Esempio n. 30
0
 def test_fit_gpytorch_model_singular(self):
     options = {"disp": False, "maxiter": 5}
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=self.device, dtype=dtype)
         Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
         test_likelihood = GaussianLikelihood(
             noise_constraint=GreaterThan(-1.0, transform=None, initial_value=0.0)
         )
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             fit_gpytorch_model(mll, options=options, max_retries=2)
             self.assertTrue(
                 any(issubclass(w.category, OptimizationWarning) for w in ws)
             )