def test_single_task_batch_cv(self):
        n = 10
        for batch_shape, num_outputs, dtype in itertools.product(
            (torch.Size(), torch.Size([2])), (1, 2),
            (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                                num_outputs=num_outputs,
                                                n=n,
                                                **tkwargs)
            if num_outputs == 1:
                train_Y = train_Y.squeeze(-1)
            train_Yvar = torch.full_like(train_Y, 0.01)
            noiseless_cv_folds = gen_loo_cv_folds(train_X=train_X,
                                                  train_Y=train_Y)
            # check shapes
            expected_shape_train_X = batch_shape + torch.Size(
                [n, n - 1, train_X.shape[-1]])
            expected_shape_test_X = batch_shape + torch.Size(
                [n, 1, train_X.shape[-1]])
            self.assertEqual(noiseless_cv_folds.train_X.shape,
                             expected_shape_train_X)
            self.assertEqual(noiseless_cv_folds.test_X.shape,
                             expected_shape_test_X)

            expected_shape_train_Y = batch_shape + torch.Size(
                [n, n - 1, num_outputs])
            expected_shape_test_Y = batch_shape + torch.Size(
                [n, 1, num_outputs])

            self.assertEqual(noiseless_cv_folds.train_Y.shape,
                             expected_shape_train_Y)
            self.assertEqual(noiseless_cv_folds.test_Y.shape,
                             expected_shape_test_Y)
            self.assertIsNone(noiseless_cv_folds.train_Yvar)
            self.assertIsNone(noiseless_cv_folds.test_Yvar)
            # Test SingleTaskGP
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                cv_results = batch_cross_validation(
                    model_cls=SingleTaskGP,
                    mll_cls=ExactMarginalLogLikelihood,
                    cv_folds=noiseless_cv_folds,
                    fit_args={"options": {
                        "maxiter": 1
                    }},
                )
            expected_shape = batch_shape + torch.Size([n, 1, num_outputs])
            self.assertEqual(cv_results.posterior.mean.shape, expected_shape)
            self.assertEqual(cv_results.observed_Y.shape, expected_shape)

            # Test FixedNoiseGP
            noisy_cv_folds = gen_loo_cv_folds(train_X=train_X,
                                              train_Y=train_Y,
                                              train_Yvar=train_Yvar)
            # check shapes
            self.assertEqual(noisy_cv_folds.train_X.shape,
                             expected_shape_train_X)
            self.assertEqual(noisy_cv_folds.test_X.shape,
                             expected_shape_test_X)
            self.assertEqual(noisy_cv_folds.train_Y.shape,
                             expected_shape_train_Y)
            self.assertEqual(noisy_cv_folds.test_Y.shape,
                             expected_shape_test_Y)
            self.assertEqual(noisy_cv_folds.train_Yvar.shape,
                             expected_shape_train_Y)
            self.assertEqual(noisy_cv_folds.test_Yvar.shape,
                             expected_shape_test_Y)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                cv_results = batch_cross_validation(
                    model_cls=FixedNoiseGP,
                    mll_cls=ExactMarginalLogLikelihood,
                    cv_folds=noisy_cv_folds,
                    fit_args={"options": {
                        "maxiter": 1
                    }},
                )
            self.assertEqual(cv_results.posterior.mean.shape, expected_shape)
            self.assertEqual(cv_results.observed_Y.shape, expected_shape)
            self.assertEqual(cv_results.observed_Y.shape, expected_shape)
Exemple #2
0
    def test_gp(self):
        d = 3
        bounds = torch.tensor([[-1.0] * d, [1.0] * d])
        for batch_shape, m, ncat, dtype in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (0, 1, 3),
            (torch.float, torch.double),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                                m=m,
                                                d=d,
                                                **tkwargs)
            cat_dims = list(range(ncat))
            ord_dims = sorted(set(range(d)) - set(cat_dims))
            with self.assertRaises(ValueError):
                MixedSingleTaskGP(
                    train_X,
                    train_Y,
                    cat_dims=cat_dims,
                    input_transform=Normalize(d=d,
                                              bounds=bounds.to(**tkwargs),
                                              transform_on_train=True),
                )
            # test correct indices
            if (ncat < 3) and (ncat > 0):
                MixedSingleTaskGP(
                    train_X,
                    train_Y,
                    cat_dims=cat_dims,
                    input_transform=Normalize(
                        d=d,
                        bounds=bounds.to(**tkwargs),
                        transform_on_train=True,
                        indices=ord_dims,
                    ),
                )
                with self.assertRaises(ValueError):
                    MixedSingleTaskGP(
                        train_X,
                        train_Y,
                        cat_dims=cat_dims,
                        input_transform=Normalize(
                            d=d,
                            bounds=bounds.to(**tkwargs),
                            transform_on_train=True,
                            indices=cat_dims,
                        ),
                    )
                with self.assertRaises(ValueError):
                    MixedSingleTaskGP(
                        train_X,
                        train_Y,
                        cat_dims=cat_dims,
                        input_transform=Normalize(
                            d=d,
                            bounds=bounds.to(**tkwargs),
                            transform_on_train=True,
                            indices=ord_dims + [random.choice(cat_dims)],
                        ),
                    )

            if len(cat_dims) == 0:
                with self.assertRaises(ValueError):
                    MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
                continue

            model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)
            self.assertEqual(model._ignore_X_dims_scaling_check, cat_dims)
            mll = ExactMarginalLogLikelihood(model.likelihood,
                                             model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            if ncat < 3:
                self.assertIsInstance(model.covar_module, AdditiveKernel)
                sum_kernel, prod_kernel = model.covar_module.kernels
                self.assertIsInstance(sum_kernel, ScaleKernel)
                self.assertIsInstance(sum_kernel.base_kernel, AdditiveKernel)
                self.assertIsInstance(prod_kernel, ScaleKernel)
                self.assertIsInstance(prod_kernel.base_kernel, ProductKernel)
                sum_cont_kernel, sum_cat_kernel = sum_kernel.base_kernel.kernels
                prod_cont_kernel, prod_cat_kernel = prod_kernel.base_kernel.kernels
                self.assertIsInstance(sum_cont_kernel, MaternKernel)
                self.assertIsInstance(sum_cat_kernel, ScaleKernel)
                self.assertIsInstance(sum_cat_kernel.base_kernel,
                                      CategoricalKernel)
                self.assertIsInstance(prod_cont_kernel, MaternKernel)
                self.assertIsInstance(prod_cat_kernel, CategoricalKernel)
            else:
                self.assertIsInstance(model.covar_module, ScaleKernel)
                self.assertIsInstance(model.covar_module.base_kernel,
                                      CategoricalKernel)

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([4, d]), **tkwargs)
            expected_shape = batch_shape + torch.Size([4, m])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # test adding observation noise
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            self.assertEqual(posterior_pred.variance.shape, expected_shape)
            pvar = posterior_pred.variance
            pvar_exp = _get_pvar_expected(posterior, model, X, m)
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, d, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, m])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            # test adding observation noise in batch mode
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            pvar = posterior_pred.variance
            pvar_exp = _get_pvar_expected(posterior, model, X, m)
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
Exemple #3
0
    def test_condition_on_observations(self):
        for batch_shape, m, dtype, use_octf in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (torch.float, torch.double),
            (False, True),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            octf = Standardize(m=m, batch_shape=batch_shape) if use_octf else None
            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape, m=m, outcome_transform=octf, **tkwargs
            )
            # evaluate model
            model.posterior(torch.rand(torch.Size([4, 1]), **tkwargs))
            # test condition_on_observations
            fant_shape = torch.Size([2])
            # fantasize at different input points
            X_fant, Y_fant = _get_random_data(
                fant_shape + batch_shape, m, n=3, **tkwargs
            )
            c_kwargs = (
                {"noise": torch.full_like(Y_fant, 0.01)}
                if isinstance(model, FixedNoiseGP)
                else {}
            )
            cm = model.condition_on_observations(X_fant, Y_fant, **c_kwargs)
            # fantasize at same input points (check proper broadcasting)
            c_kwargs_same_inputs = (
                {"noise": torch.full_like(Y_fant[0], 0.01)}
                if isinstance(model, FixedNoiseGP)
                else {}
            )
            cm_same_inputs = model.condition_on_observations(
                X_fant[0], Y_fant, **c_kwargs_same_inputs
            )

            test_Xs = [
                # test broadcasting single input across fantasy and model batches
                torch.rand(4, 1, **tkwargs),
                # separate input for each model batch and broadcast across
                # fantasy batches
                torch.rand(batch_shape + torch.Size([4, 1]), **tkwargs),
                # separate input for each model and fantasy batch
                torch.rand(fant_shape + batch_shape + torch.Size([4, 1]), **tkwargs),
            ]
            for test_X in test_Xs:
                posterior = cm.posterior(test_X)
                self.assertEqual(
                    posterior.mean.shape, fant_shape + batch_shape + torch.Size([4, m])
                )
                posterior_same_inputs = cm_same_inputs.posterior(test_X)
                self.assertEqual(
                    posterior_same_inputs.mean.shape,
                    fant_shape + batch_shape + torch.Size([4, m]),
                )

                # check that fantasies of batched model are correct
                if len(batch_shape) > 0 and test_X.dim() == 2:
                    state_dict_non_batch = {
                        key: (val[0] if val.numel() > 1 else val)
                        for key, val in model.state_dict().items()
                    }
                    model_kwargs_non_batch = {
                        "train_X": model_kwargs["train_X"][0],
                        "train_Y": model_kwargs["train_Y"][0],
                    }
                    if "train_Yvar" in model_kwargs:
                        model_kwargs_non_batch["train_Yvar"] = model_kwargs[
                            "train_Yvar"
                        ][0]
                    if model_kwargs["outcome_transform"] is not None:
                        model_kwargs_non_batch["outcome_transform"] = Standardize(m=m)
                    model_non_batch = type(model)(**model_kwargs_non_batch)
                    model_non_batch.load_state_dict(state_dict_non_batch)
                    model_non_batch.eval()
                    model_non_batch.likelihood.eval()
                    model_non_batch.posterior(torch.rand(torch.Size([4, 1]), **tkwargs))
                    c_kwargs = (
                        {"noise": torch.full_like(Y_fant[0, 0, :], 0.01)}
                        if isinstance(model, FixedNoiseGP)
                        else {}
                    )
                    cm_non_batch = model_non_batch.condition_on_observations(
                        X_fant[0][0], Y_fant[:, 0, :], **c_kwargs
                    )
                    non_batch_posterior = cm_non_batch.posterior(test_X)
                    self.assertTrue(
                        torch.allclose(
                            posterior_same_inputs.mean[:, 0, ...],
                            non_batch_posterior.mean,
                            atol=1e-3,
                        )
                    )
                    self.assertTrue(
                        torch.allclose(
                            posterior_same_inputs.mvn.covariance_matrix[:, 0, :, :],
                            non_batch_posterior.mvn.covariance_matrix,
                            atol=1e-3,
                        )
                    )
Exemple #4
0
    def test_condition_on_observations(self):
        d = 3
        for batch_shape, m, ncat, dtype in itertools.product(
            (torch.Size(), torch.Size([2])),
            (1, 2),
            (1, 2),
            (torch.float, torch.double),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}
            train_X, train_Y = _get_random_data(batch_shape=batch_shape,
                                                m=m,
                                                d=d,
                                                **tkwargs)
            cat_dims = list(range(ncat))
            model = MixedSingleTaskGP(train_X, train_Y, cat_dims=cat_dims)

            # evaluate model
            model.posterior(torch.rand(torch.Size([4, d]), **tkwargs))
            # test condition_on_observations
            fant_shape = torch.Size([2])

            # fantasize at different input points
            X_fant, Y_fant = _get_random_data(fant_shape + batch_shape,
                                              m=m,
                                              d=d,
                                              n=3,
                                              **tkwargs)
            cm = model.condition_on_observations(X_fant, Y_fant)
            # fantasize at same input points (check proper broadcasting)
            cm_same_inputs = model.condition_on_observations(
                X_fant[0],
                Y_fant,
            )

            test_Xs = [
                # test broadcasting single input across fantasy and model batches
                torch.rand(4, d, **tkwargs),
                # separate input for each model batch and broadcast across
                # fantasy batches
                torch.rand(batch_shape + torch.Size([4, d]), **tkwargs),
                # separate input for each model and fantasy batch
                torch.rand(fant_shape + batch_shape + torch.Size([4, d]),
                           **tkwargs),
            ]
            for test_X in test_Xs:
                posterior = cm.posterior(test_X)
                self.assertEqual(posterior.mean.shape,
                                 fant_shape + batch_shape + torch.Size([4, m]))
                posterior_same_inputs = cm_same_inputs.posterior(test_X)
                self.assertEqual(
                    posterior_same_inputs.mean.shape,
                    fant_shape + batch_shape + torch.Size([4, m]),
                )

                # check that fantasies of batched model are correct
                if len(batch_shape) > 0 and test_X.dim() == 2:
                    state_dict_non_batch = {
                        key: (val[0] if val.ndim > 1 else val)
                        for key, val in model.state_dict().items()
                    }
                    model_kwargs_non_batch = {
                        "train_X": train_X[0],
                        "train_Y": train_Y[0],
                        "cat_dims": cat_dims,
                    }
                    model_non_batch = type(model)(**model_kwargs_non_batch)
                    model_non_batch.load_state_dict(state_dict_non_batch)
                    model_non_batch.eval()
                    model_non_batch.likelihood.eval()
                    model_non_batch.posterior(
                        torch.rand(torch.Size([4, d]), **tkwargs))
                    cm_non_batch = model_non_batch.condition_on_observations(
                        X_fant[0][0],
                        Y_fant[:, 0, :],
                    )
                    non_batch_posterior = cm_non_batch.posterior(test_X)
                    self.assertTrue(
                        torch.allclose(
                            posterior_same_inputs.mean[:, 0, ...],
                            non_batch_posterior.mean,
                            atol=1e-3,
                        ))
                    self.assertTrue(
                        torch.allclose(
                            posterior_same_inputs.mvn.
                            covariance_matrix[:, 0, :, :],
                            non_batch_posterior.mvn.covariance_matrix,
                            atol=1e-3,
                        ))