示例#1
0
    def test_fantasize(self):
        for batch_shape, m, dtype, use_octf in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (torch.float, torch.double),
            (False, True),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
            model, _ = self._get_model_and_data(
                batch_shape=batch_shape, m=m, outcome_transform=octf, **tkwargs
            )
            # fantasize
            X_f = torch.rand(torch.Size(batch_shape + torch.Size([4, 1])), **tkwargs)
            sampler = SobolQMCNormalSampler(num_samples=3)
            fm = model.fantasize(X=X_f, sampler=sampler)
            self.assertIsInstance(fm, model.__class__)
            fm = model.fantasize(X=X_f, sampler=sampler, observation_noise=False)
            self.assertIsInstance(fm, model.__class__)

        # check that input transforms are applied to X.
        tkwargs = {"device": self.device, "dtype": torch.float}
        intf = Normalize(d=1, bounds=torch.tensor([[0], [10]], **tkwargs))
        model, _ = self._get_model_and_data(
            batch_shape=torch.Size(),
            m=1,
            input_transform=intf,
            **tkwargs,
        )
        X_f = torch.rand(4, 1, **tkwargs)
        fm = model.fantasize(X_f, sampler=SobolQMCNormalSampler(num_samples=3))
        self.assertTrue(
            torch.allclose(fm.train_inputs[0][:, -4:], intf(X_f).expand(3, -1, -1))
        )
示例#2
0
    def test_input_transforms(self):
        for infer_noise in [True, False]:
            tkwargs = {"device": self.device, "dtype": torch.double}
            train_X, train_Y, train_Yvar, test_X = self._get_unnormalized_data(
                infer_noise=infer_noise, **tkwargs
            )
            n, d = train_X.shape

            lb, ub = train_X.min(dim=0).values, train_X.max(dim=0).values
            mu, sigma = train_Y.mean(), train_Y.std()

            # Fit without transforms
            with torch.random.fork_rng():
                torch.manual_seed(0)
                gp1 = SaasFullyBayesianSingleTaskGP(
                    train_X=(train_X - lb) / (ub - lb),
                    train_Y=(train_Y - mu) / sigma,
                    train_Yvar=train_Yvar / sigma ** 2
                    if train_Yvar is not None
                    else train_Yvar,
                )
                fit_fully_bayesian_model_nuts(
                    gp1, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
                )
                posterior1 = gp1.posterior(
                    (test_X - lb) / (ub - lb), marginalize_over_mcmc_samples=True
                )
                pred_mean1 = mu + sigma * posterior1.mean
                pred_var1 = (sigma ** 2) * posterior1.variance

            # Fit with transforms
            with torch.random.fork_rng():
                torch.manual_seed(0)
                gp2 = SaasFullyBayesianSingleTaskGP(
                    train_X=train_X,
                    train_Y=train_Y,
                    train_Yvar=train_Yvar,
                    input_transform=Normalize(d=train_X.shape[-1]),
                    outcome_transform=Standardize(m=1),
                )
                fit_fully_bayesian_model_nuts(
                    gp2, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
                )
                posterior2 = gp2.posterior(test_X, marginalize_over_mcmc_samples=True)
                pred_mean2, pred_var2 = posterior2.mean, posterior2.variance

            self.assertTrue(torch.allclose(pred_mean1, pred_mean2))
            self.assertTrue(torch.allclose(pred_var1, pred_var2))
示例#3
0
    def test_gp(self):
        bounds = torch.tensor([[-1.0], [1.0]])
        for batch_shape, m, dtype, use_octf, use_intf in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (torch.float, torch.double),
            (False, True),
            (False, True),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
            intf = (
                Normalize(
                    d=1, bounds=bounds.to(**tkwargs), transform_on_set_train_data=True
                )
                if use_intf
                else None
            )
            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape,
                m=m,
                outcome_transform=octf,
                input_transform=intf,
                **tkwargs
            )
            mll = ExactMarginalLogLikelihood(model.likelihood, model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            if use_octf:
                self.assertIsInstance(model.outcome_transform, Standardize)
            if use_intf:
                self.assertIsInstance(model.input_transform, Normalize)
                # permute output dim
                train_X, train_Y, _ = model._transform_tensor_args(
                    X=model_kwargs["train_X"], Y=model_kwargs["train_Y"]
                )
                # check that the train inputs have been transformed and set on the model
                self.assertTrue(torch.equal(model.train_inputs[0], intf(train_X)))

            # test param sizes
            params = dict(model.named_parameters())
            for p in params:
                self.assertEqual(
                    params[p].numel(), m * torch.tensor(batch_shape).prod().item()
                )

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
            expected_shape = batch_shape + torch.Size([3, m])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # test adding observation noise
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            self.assertEqual(posterior_pred.variance.shape, expected_shape)
            if use_octf:
                # ensure un-transformation is applied
                tmp_tf = model.outcome_transform
                del model.outcome_transform
                pp_tf = model.posterior(X, observation_noise=True)
                model.outcome_transform = tmp_tf
                expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                self.assertTrue(torch.allclose(posterior_pred.variance, expected_var))
            else:
                pvar = posterior_pred.variance
                pvar_exp = _get_pvar_expected(posterior, model, X, m)
                self.assertTrue(torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, 1, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])

            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            # test adding observation noise in batch mode
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            if use_octf:
                # ensure un-transformation is applied
                tmp_tf = model.outcome_transform
                del model.outcome_transform
                pp_tf = model.posterior(X, observation_noise=True)
                model.outcome_transform = tmp_tf
                expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                self.assertTrue(torch.allclose(posterior_pred.variance, expected_var))
            else:
                pvar = posterior_pred.variance
                pvar_exp = _get_pvar_expected(posterior, model, X, m)
                self.assertTrue(torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
示例#4
0
    def test_gp(self):
        d = 3
        bounds = torch.tensor([[-1.0] * d, [1.0] * d])
        for batch_shape, m, ncat, dtype in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (0, 1, 3),
            (torch.float, torch.double),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                                m=m,
                                                d=d,
                                                **tkwargs)
            cat_dims = list(range(ncat))
            ord_dims = sorted(set(range(d)) - set(cat_dims))
            with self.assertRaises(ValueError):
                MixedSingleTaskGP(
                    train_X,
                    train_Y,
                    cat_dims=cat_dims,
                    input_transform=Normalize(d=d,
                                              bounds=bounds.to(**tkwargs),
                                              transform_on_train=True),
                )
            # test correct indices
            if (ncat < 3) and (ncat > 0):
                MixedSingleTaskGP(
                    train_X,
                    train_Y,
                    cat_dims=cat_dims,
                    input_transform=Normalize(
                        d=d,
                        bounds=bounds.to(**tkwargs),
                        transform_on_train=True,
                        indices=ord_dims,
                    ),
                )
                with self.assertRaises(ValueError):
                    MixedSingleTaskGP(
                        train_X,
                        train_Y,
                        cat_dims=cat_dims,
                        input_transform=Normalize(
                            d=d,
                            bounds=bounds.to(**tkwargs),
                            transform_on_train=True,
                            indices=cat_dims,
                        ),
                    )
                with self.assertRaises(ValueError):
                    MixedSingleTaskGP(
                        train_X,
                        train_Y,
                        cat_dims=cat_dims,
                        input_transform=Normalize(
                            d=d,
                            bounds=bounds.to(**tkwargs),
                            transform_on_train=True,
                            indices=ord_dims + [random.choice(cat_dims)],
                        ),
                    )

            if len(cat_dims) == 0:
                with self.assertRaises(ValueError):
                    MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
                continue

            model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
            self.assertEqual(model._ignore_X_dims_scaling_check, cat_dims)
            mll = ExactMarginalLogLikelihood(model.likelihood,
                                             model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            if ncat < 3:
                self.assertIsInstance(model.covar_module, AdditiveKernel)
                sum_kernel, prod_kernel = model.covar_module.kernels
                self.assertIsInstance(sum_kernel, ScaleKernel)
                self.assertIsInstance(sum_kernel.base_kernel, AdditiveKernel)
                self.assertIsInstance(prod_kernel, ScaleKernel)
                self.assertIsInstance(prod_kernel.base_kernel, ProductKernel)
                sum_cont_kernel, sum_cat_kernel = sum_kernel.base_kernel.kernels
                prod_cont_kernel, prod_cat_kernel = prod_kernel.base_kernel.kernels
                self.assertIsInstance(sum_cont_kernel, MaternKernel)
                self.assertIsInstance(sum_cat_kernel, ScaleKernel)
                self.assertIsInstance(sum_cat_kernel.base_kernel,
                                      CategoricalKernel)
                self.assertIsInstance(prod_cont_kernel, MaternKernel)
                self.assertIsInstance(prod_cat_kernel, CategoricalKernel)
            else:
                self.assertIsInstance(model.covar_module, ScaleKernel)
                self.assertIsInstance(model.covar_module.base_kernel,
                                      CategoricalKernel)

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([4, d]), **tkwargs)
            expected_shape = batch_shape + torch.Size([4, m])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # test adding observation noise
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            self.assertEqual(posterior_pred.variance.shape, expected_shape)
            pvar = posterior_pred.variance
            pvar_exp = _get_pvar_expected(posterior, model, X, m)
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, d, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            # test adding observation noise in batch mode
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            pvar = posterior_pred.variance
            pvar_exp = _get_pvar_expected(posterior, model, X, m)
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
    def test_gp(self):
        for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
            num_dim = 1 + (iteration_fidelity is not None) + (data_fidelity
                                                              is not None)
            bounds = torch.zeros(2, num_dim)
            bounds[1] = 1
            for (
                    batch_shape,
                    m,
                    dtype,
                    lin_trunc,
                    use_octf,
                    use_intf,
            ) in itertools.product(
                (torch.Size(), torch.Size([2])),
                (1, 2),
                (torch.float, torch.double),
                (False, True),
                (False, True),
                (False, True),
            ):
                tkwargs = {"device": self.device, "dtype": dtype}
                octf = Standardize(
                    m=m, batch_shape=batch_shape) if use_octf else None
                intf = Normalize(d=num_dim,
                                 bounds=bounds) if use_intf else None
                model, model_kwargs = self._get_model_and_data(
                    iteration_fidelity=iteration_fidelity,
                    data_fidelity=data_fidelity,
                    batch_shape=batch_shape,
                    m=m,
                    lin_truncated=lin_trunc,
                    outcome_transform=octf,
                    input_transform=intf,
                    **tkwargs,
                )
                mll = ExactMarginalLogLikelihood(model.likelihood, model)
                mll.to(**tkwargs)
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore",
                                            category=OptimizationWarning)
                    fit_gpytorch_model(mll,
                                       sequential=False,
                                       options={"maxiter": 1})

                # test init
                self.assertIsInstance(model.mean_module, ConstantMean)
                self.assertIsInstance(model.covar_module, ScaleKernel)
                if use_octf:
                    self.assertIsInstance(model.outcome_transform, Standardize)
                if use_intf:
                    self.assertIsInstance(model.input_transform, Normalize)
                    # permute output dim
                    train_X, train_Y, _ = model._transform_tensor_args(
                        X=model_kwargs["train_X"], Y=model_kwargs["train_Y"])
                    # check that the train inputs have been transformed and set on the
                    # model
                    self.assertTrue(
                        torch.equal(model.train_inputs[0], intf(train_X)))

                # test param sizes
                params = dict(model.named_parameters())
                for p in params:
                    self.assertEqual(
                        params[p].numel(),
                        m * torch.tensor(batch_shape).prod().item())

                # test posterior
                # test non batch evaluation
                X = torch.rand(*batch_shape, 3, num_dim, **tkwargs)
                expected_shape = batch_shape + torch.Size([3, m])
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape, expected_shape)
                self.assertEqual(posterior.variance.shape, expected_shape)
                if use_octf:
                    # ensure un-transformation is applied
                    tmp_tf = model.outcome_transform
                    del model.outcome_transform
                    pp_tf = model.posterior(X)
                    model.outcome_transform = tmp_tf
                    expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                    self.assertTrue(
                        torch.allclose(posterior.variance, expected_var))

                # test batch evaluation
                X = torch.rand(2, *batch_shape, 3, num_dim, **tkwargs)
                expected_shape = torch.Size([2]) + batch_shape + torch.Size(
                    [3, m])
                posterior = model.posterior(X)
                self.assertIsInstance(posterior, GPyTorchPosterior)
                self.assertEqual(posterior.mean.shape, expected_shape)
                self.assertEqual(posterior.variance.shape, expected_shape)
                if use_octf:
                    # ensure un-transformation is applied
                    tmp_tf = model.outcome_transform
                    del model.outcome_transform
                    pp_tf = model.posterior(X)
                    model.outcome_transform = tmp_tf
                    expected_var = tmp_tf.untransform_posterior(pp_tf).variance
                    self.assertTrue(
                        torch.allclose(posterior.variance, expected_var))