Пример #1
0
    def test_gp(self):
        for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
            num_dim = 1 + (iteration_fidelity is not None) + (data_fidelity
                                                              is not None)
            for batch_shape, num_outputs, dtype, lin_trunc in itertools.product(
                (torch.Size(), torch.Size([2])),
                (1, 2),
                (torch.float, torch.double),
                (False, True),
            ):
                tkwargs = {"device": self.device, "dtype": dtype}
                model, _ = _get_model_and_data(
                    iteration_fidelity=iteration_fidelity,
                    data_fidelity=data_fidelity,
                    batch_shape=batch_shape,
                    num_outputs=num_outputs,
                    lin_truncated=lin_trunc,
                    **tkwargs,
                )
                mll = ExactMarginalLogLikelihood(model.likelihood, model)
                mll.to(**tkwargs)
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore",
                                            category=OptimizationWarning)
                    fit_gpytorch_model(mll,
                                       sequential=False,
                                       options={"maxiter": 1})

                # test init
                self.assertIsInstance(model.mean_module, ConstantMean)
                self.assertIsInstance(model.covar_module, ScaleKernel)

                # test param sizes
                params = dict(model.named_parameters())
                for p in params:
                    self.assertEqual(
                        params[p].numel(),
                        num_outputs * torch.tensor(batch_shape).prod().item(),
                    )

                # test posterior
                # test non batch evaluation
                X = torch.rand(batch_shape + torch.Size([3, num_dim]),
                               **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape,
                                 batch_shape + torch.Size([3, num_outputs]))
                # test batch evaluation
                X = torch.rand(
                    torch.Size([2]) + batch_shape + torch.Size([3, num_dim]),
                    **tkwargs)
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(
                    posterior.mean.shape,
                    torch.Size([2]) + batch_shape +
                    torch.Size([3, num_outputs]),
                )
Пример #2
0
    def test_sample_all_priors(self, cuda=False):
        device = torch.device("cuda" if cuda else "cpu")
        for dtype in (torch.float, torch.double):
            train_X = torch.rand(3, 5, device=device, dtype=dtype)
            train_Y = torch.rand(3, 1, device=device, dtype=dtype)
            model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll.to(device=device, dtype=dtype)
            original_state_dict = dict(deepcopy(mll.model.state_dict()))
            sample_all_priors(model)

            # make sure one of the hyperparameters changed
            self.assertTrue(
                dict(model.state_dict())["likelihood.noise_covar.raw_noise"] !=
                original_state_dict["likelihood.noise_covar.raw_noise"])
            # check that lengthscales are all different
            ls = model.covar_module.base_kernel.raw_lengthscale.view(
                -1).tolist()
            self.assertTrue(all(ls[0] != ls[i]) for i in range(1, len(ls)))

            # change one of the priors to SmoothedBoxPrior
            model.covar_module = ScaleKernel(
                MaternKernel(
                    nu=2.5,
                    ard_num_dims=model.train_inputs[0].shape[-1],
                    batch_shape=model._aug_batch_shape,
                    lengthscale_prior=SmoothedBoxPrior(3.0, 6.0),
                ),
                batch_shape=model._aug_batch_shape,
                outputscale_prior=GammaPrior(2.0, 0.15),
            )
            original_state_dict = dict(deepcopy(mll.model.state_dict()))
            with warnings.catch_warnings(
                    record=True) as ws, settings.debug(True):
                sample_all_priors(model)
                self.assertEqual(len(ws), 1)
                self.assertTrue("rsample" in str(ws[0].message))

            # the lengthscale should not have changed because sampling is
            # not implemented for SmoothedBoxPrior
            self.assertTrue(
                torch.equal(
                    dict(model.state_dict())
                    ["covar_module.base_kernel.raw_lengthscale"],
                    original_state_dict[
                        "covar_module.base_kernel.raw_lengthscale"],
                ))

            # set setting_closure to None and make sure RuntimeError is raised
            prior_tuple = model.likelihood.noise_covar._priors["noise_prior"]
            model.likelihood.noise_covar._priors["noise_prior"] = (
                prior_tuple[0],
                prior_tuple[1],
                None,
            )
            with self.assertRaises(RuntimeError):
                sample_all_priors(model)
Пример #3
0
 def test_fit_gpytorch_model_singular(self, cuda=False):
     options = {"disp": False, "maxiter": 5}
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=device, dtype=dtype)
         Y_train = torch.zeros(2, device=device, dtype=dtype)
         test_likelihood = GaussianLikelihood(noise_constraint=GreaterThan(
             -1.0, transform=None, initial_value=0.0))
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         fit_gpytorch_model(mll, options=options, max_retries=2)
Пример #4
0
 def _getBatchedModel(
     self, kind="SingleTaskGP", double=False, outcome_transform=False
 ):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze(
         -1
     )
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     train_y1 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y2 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y = torch.cat([train_y1, train_y2], dim=-1)
     kwargs = {}
     if outcome_transform:
         kwargs["outcome_transform"] = Standardize(m=2)
     if kind == "SingleTaskGP":
         model = SingleTaskGP(train_x, train_y, **kwargs)
     elif kind == "FixedNoiseGP":
         model = FixedNoiseGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     elif kind == "HeteroskedasticSingleTaskGP":
         model = HeteroskedasticSingleTaskGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     else:
         raise NotImplementedError
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=self.device, dtype=dtype)
Пример #5
0
 def test_fit_gpytorch_model_singular(self):
     options = {"disp": False, "maxiter": 5}
     for dtype in (torch.float, torch.double):
         X_train = torch.rand(2, 2, device=self.device, dtype=dtype)
         Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
         test_likelihood = GaussianLikelihood(
             noise_constraint=GreaterThan(-1.0, transform=None, initial_value=0.0)
         )
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             fit_gpytorch_model(mll, options=options, max_retries=2)
             self.assertTrue(
                 any(issubclass(w.category, OptimizationWarning) for w in ws)
             )
Пример #6
0
def get_fitted_model(train_x, train_obj, state_dict=None):
    # initialize and fit model
    model = SingleTaskGP(train_X=train_x, train_Y=train_obj)

    # # initialize likelihood and model
    # likelihood = gpytorch.likelihoods.GaussianLikelihood()
    # model = ExactGPModel(train_x, train_obj, likelihood)
    # model.train()
    # likelihood.train()

    if state_dict is not None:
        model.load_state_dict(state_dict)
    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    mll.to(train_x)
    fit_gpytorch_model(mll)

    return model
Пример #7
0
 def _getModel(self, double=False):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device,
                              dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     train_y = torch.sin(train_x * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=self.device, dtype=dtype)
Пример #8
0
 def _getModel(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y = torch.sin(train_x.view(-1) * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
Пример #9
0
 def test_fit_gpytorch_model_singular(self):
     options = {"disp": False, "maxiter": 5}
     for dtype in (torch.float, torch.double):
         X_train = torch.ones(2, 2, device=self.device, dtype=dtype)
         Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
         test_likelihood = GaussianLikelihood(
             noise_constraint=GreaterThan(-1e-7, transform=None, initial_value=0.0)
         )
         gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         # this will do multiple retries (and emit warnings, which is desired)
         with warnings.catch_warnings(record=True) as ws, settings.debug(True):
             fit_gpytorch_model(mll, options=options, max_retries=2)
             self.assertTrue(
                 any(issubclass(w.category, NumericalWarning) for w in ws)
             )
         # ensure that we fail if noise ensures that jitter does not help
         gp.likelihood = GaussianLikelihood(
             noise_constraint=Interval(-2, -1, transform=None, initial_value=-1.5)
         )
         mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
         mll.to(device=self.device, dtype=dtype)
         with self.assertRaises(NotPSDError):
             fit_gpytorch_model(mll, options=options, max_retries=2)
         # ensure we can handle NaNErrors in the optimizer
         with mock.patch.object(SingleTaskGP, "__call__", side_effect=NanError):
             gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
             mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
             mll.to(device=self.device, dtype=dtype)
             fit_gpytorch_model(
                 mll, options={"disp": False, "maxiter": 1}, max_retries=1
             )
Пример #10
0
    def test_fit_gpytorch_model_singular(self):
        options = {"disp": False, "maxiter": 5}
        for dtype in (torch.float, torch.double):
            X_train = torch.ones(2, 2, device=self.device, dtype=dtype)
            Y_train = torch.zeros(2, 1, device=self.device, dtype=dtype)
            test_likelihood = GaussianLikelihood(
                noise_constraint=GreaterThan(-1e-7, transform=None, initial_value=0.0)
            )
            gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            mll.to(device=self.device, dtype=dtype)
            # this will do multiple retries (and emit warnings, which is desired)
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                fit_gpytorch_model(mll, options=options, max_retries=2)
                self.assertTrue(
                    any(issubclass(w.category, NumericalWarning) for w in ws)
                )
            # ensure that we fail if noise ensures that jitter does not help
            gp.likelihood = GaussianLikelihood(
                noise_constraint=Interval(-2, -1, transform=None, initial_value=-1.5)
            )
            mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
            mll.to(device=self.device, dtype=dtype)
            with self.assertLogs(level="DEBUG") as logs:
                fit_gpytorch_model(mll, options=options, max_retries=2)
            self.assertTrue(any("NotPSDError" in log for log in logs.output))
            # ensure we can handle NaNErrors in the optimizer
            with mock.patch.object(SingleTaskGP, "__call__", side_effect=NanError):
                gp = SingleTaskGP(X_train, Y_train, likelihood=test_likelihood)
                mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
                mll.to(device=self.device, dtype=dtype)
                fit_gpytorch_model(
                    mll, options={"disp": False, "maxiter": 1}, max_retries=1
                )
            # ensure we catch NotPSDErrors
            with mock.patch.object(SingleTaskGP, "__call__", side_effect=NotPSDError):
                mll = self._getModel()
                with self.assertLogs(level="DEBUG") as logs:
                    fit_gpytorch_model(mll, max_retries=2)
                for retry in [1, 2]:
                    self.assertTrue(
                        any(
                            f"Fitting failed on try {retry} due to a NotPSDError."
                            in log
                            for log in logs.output
                        )
                    )

            # Failure due to optimization warning

            def optimize_w_warning(mll, **kwargs):
                warnings.warn("Dummy warning.", OptimizationWarning)
                return mll, None

            mll = self._getModel()
            with self.assertLogs(level="DEBUG") as logs, settings.debug(True):
                fit_gpytorch_model(mll, optimizer=optimize_w_warning, max_retries=2)
            self.assertTrue(
                any("Fitting failed on try 1." in log for log in logs.output)
            )
Пример #11
0
 def _getBatchedModel(self, kind="SingleTaskGP", double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device,
                              dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y1 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y2 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y = torch.cat([train_y1, train_y2], dim=-1)
     if kind == "SingleTaskGP":
         model = SingleTaskGP(train_x, train_y)
     elif kind == "FixedNoiseGP":
         model = FixedNoiseGP(train_x, train_y,
                              0.1 * torch.ones_like(train_y))
     elif kind == "HeteroskedasticSingleTaskGP":
         model = HeteroskedasticSingleTaskGP(train_x, train_y,
                                             0.1 * torch.ones_like(train_y))
     else:
         raise NotImplementedError
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
    def test_gp(self):
        for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
            num_dim = 1 + (iteration_fidelity is not None) + (data_fidelity
                                                              is not None)
            for batch_shape, m, dtype, lin_trunc, use_octf in itertools.product(
                (torch.Size(), torch.Size([2])),
                (1, 2),
                (torch.float, torch.double),
                (False, True),
                (False, True),
            ):
                tkwargs = {"device": self.device, "dtype": dtype}
                octf = Standardize(
                    m=m, batch_shape=batch_shape) if use_octf else None
                model, _ = _get_model_and_data(
                    iteration_fidelity=iteration_fidelity,
                    data_fidelity=data_fidelity,
                    batch_shape=batch_shape,
                    m=m,
                    lin_truncated=lin_trunc,
                    outcome_transform=octf,
                    **tkwargs,
                )
                mll = ExactMarginalLogLikelihood(model.likelihood, model)
                mll.to(**tkwargs)
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore",
                                            category=OptimizationWarning)
                    fit_gpytorch_model(mll,
                                       sequential=False,
                                       options={"maxiter": 1})

                # test init
                self.assertIsInstance(model.mean_module, ConstantMean)
                self.assertIsInstance(model.covar_module, ScaleKernel)
                if use_octf:
                    self.assertIsInstance(model.outcome_transform, Standardize)

                # test param sizes
                params = dict(model.named_parameters())
                for p in params:
                    self.assertEqual(
                        params[p].numel(),
                        m * torch.tensor(batch_shape).prod().item())

                # test posterior
                # test non batch evaluation
                X = torch.rand(*batch_shape, 3, num_dim, **tkwargs)
                expected_shape = batch_shape + torch.Size([3, m])
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape, expected_shape)
                self.assertEqual(posterior.variance.shape, expected_shape)
                if use_octf:
                    # ensure un-transformation is applied
                    tmp_tf = model.outcome_transform
                    del model.outcome_transform
                    pp_tf = model.posterior(X)
                    model.outcome_transform = tmp_tf
                    expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                    self.assertTrue(
                        torch.allclose(posterior.variance, expected_var))

                # test batch evaluation
                X = torch.rand(2, *batch_shape, 3, num_dim, **tkwargs)
                expected_shape = torch.Size([2]) + batch_shape + torch.Size(
                    [3, m])
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape, expected_shape)
                self.assertEqual(posterior.variance.shape, expected_shape)
                if use_octf:
                    # ensure un-transformation is applied
                    tmp_tf = model.outcome_transform
                    del model.outcome_transform
                    pp_tf = model.posterior(X)
                    model.outcome_transform = tmp_tf
                    expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                    self.assertTrue(
                        torch.allclose(posterior.variance, expected_var))