示例#1
0
    def setUp(self):
        super().setUp()
        torch.random.manual_seed(0)

        train_x = torch.rand(2, 10, 1, device=self.device)
        train_y = torch.randn(2, 10, 3, 5, device=self.device)

        self.model = HigherOrderGP(train_x, train_y)

        # check that we can assign different kernels and likelihoods
        model_2 = HigherOrderGP(
            train_X=train_x,
            train_Y=train_y,
            covar_modules=[RBFKernel(), RBFKernel(),
                           RBFKernel()],
            likelihood=GaussianLikelihood(),
        )

        model_3 = HigherOrderGP(
            train_X=train_x,
            train_Y=train_y,
            covar_modules=[RBFKernel(), RBFKernel(),
                           RBFKernel()],
            likelihood=GaussianLikelihood(),
            latent_init="gp",
        )

        for m in [self.model, model_2, model_3]:
            mll = ExactMarginalLogLikelihood(m.likelihood, m)
            fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})
示例#2
0
    def test_num_output_dims(self):
        for dtype in [torch.float, torch.double]:
            train_x = torch.rand(2, 10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(2, 10, 3, 5, device=self.device, dtype=dtype)
            model = HigherOrderGP(train_x, train_y)

            # check that it correctly inferred that this is a batched model
            self.assertEqual(model._num_outputs, 2)

            train_x = torch.rand(10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(10, 3, 5, 2, device=self.device, dtype=dtype)
            model = HigherOrderGP(train_x, train_y)

            # non-batched case
            self.assertEqual(model._num_outputs, 1)

            train_x = torch.rand(3, 2, 10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(3,
                                  2,
                                  10,
                                  3,
                                  5,
                                  device=self.device,
                                  dtype=dtype)

            # check the error when using multi-dim batch_shape
            with self.assertRaises(NotImplementedError):
                model = HigherOrderGP(train_x, train_y)
示例#3
0
    def test_initialize_latents(self):
        for dtype in [torch.float, torch.double]:
            torch.random.manual_seed(0)

            train_x = torch.rand(10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(10, 3, 5, device=self.device, dtype=dtype)

            for latent_dim_sizes, latent_init in itertools.product(
                [[1, 1], [2, 3]],
                ["gp", "default"],
            ):
                self.model = HigherOrderGP(
                    train_x,
                    train_y,
                    num_latent_dims=latent_dim_sizes,
                    latent_init=latent_init,
                )
                self.assertEqual(
                    self.model.latent_parameters[0].shape,
                    torch.Size((3, latent_dim_sizes[0])),
                )
                self.assertEqual(
                    self.model.latent_parameters[1].shape,
                    torch.Size((5, latent_dim_sizes[1])),
                )
示例#4
0
    def setUp(self):
        super().setUp()
        manual_seed(0)

        train_x = rand(2, 10, 1)
        train_y = randn(2, 10, 3, 5)

        train_x = train_x.to(device=self.device)
        train_y = train_y.to(device=self.device)

        self.model = HigherOrderGP(train_x, train_y, first_dim_is_batch=True)

        # check that we can assign different kernels and likelihoods
        model_2 = HigherOrderGP(
            train_x,
            train_y,
            first_dim_is_batch=True,
            covar_modules=[RBFKernel(), RBFKernel(),
                           RBFKernel()],
            likelihood=GaussianLikelihood(),
        )

        for m in [self.model, model_2]:
            mll = ExactMarginalLogLikelihood(m.likelihood, m)
            fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})
示例#5
0
    def setUp(self):
        super().setUp()
        manual_seed(0)

        train_x = rand(2, 10, 1)
        train_y = randn(2, 10, 3, 5)

        train_x = train_x.to(device=self.device)
        train_y = train_y.to(device=self.device)

        m1 = HigherOrderGP(train_x, train_y, first_dim_is_batch=True)
        m2 = HigherOrderGP(train_x[0], train_y[0])

        manual_seed(0)
        test_x = rand(2, 5, 1).to(device=self.device)

        posterior1 = m1.posterior(test_x)
        posterior2 = m2.posterior(test_x[0])
        posterior3 = m2.posterior(test_x)

        self.post_list = [
            [m1, test_x, posterior1],
            [m2, test_x[0], posterior2],
            [m2, test_x, posterior3],
        ]
示例#6
0
    def test_transforms(self):
        train_x = torch.rand(10, 3, device=self.device)
        train_y = torch.randn(10, 4, 5, device=self.device)

        # test handling of Standardize
        with self.assertWarns(RuntimeWarning):
            model = HigherOrderGP(train_X=train_x,
                                  train_Y=train_y,
                                  outcome_transform=Standardize(m=5))
        self.assertIsInstance(model.outcome_transform, FlattenedStandardize)
        self.assertEqual(model.outcome_transform.output_shape,
                         train_y.shape[1:])
        self.assertEqual(model.outcome_transform.batch_shape, torch.Size())

        model = HigherOrderGP(
            train_X=train_x,
            train_Y=train_y,
            input_transform=Normalize(d=3),
            outcome_transform=FlattenedStandardize(train_y.shape[1:]),
        )
        mll = ExactMarginalLogLikelihood(model.likelihood, model)
        fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})

        test_x = torch.rand(2, 5, 3, device=self.device)
        test_y = torch.randn(2, 5, 4, 5, device=self.device)
        posterior = model.posterior(test_x)
        self.assertIsInstance(posterior, TransformedPosterior)

        conditioned_model = model.condition_on_observations(test_x, test_y)
        self.assertIsInstance(conditioned_model, HigherOrderGP)

        self.check_transform_forward(model)
        self.check_transform_untransform(model)
示例#7
0
    def test_transforms(self):
        train_x = rand(10, 3, device=self.device)
        train_y = randn(10, 4, 5, device=self.device)
        model = HigherOrderGP(
            train_x,
            train_y,
            input_transform=Normalize(d=3),
            outcome_transform=FlattenedStandardize(train_y.shape[1:]),
        )
        mll = ExactMarginalLogLikelihood(model.likelihood, model)
        fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})

        test_x = rand(2, 5, 3, device=self.device)
        test_y = randn(2, 5, 4, 5, device=self.device)
        posterior = model.posterior(test_x)
        self.assertIsInstance(posterior, TransformedPosterior)

        conditioned_model = model.condition_on_observations(test_x, test_y)
        self.assertIsInstance(conditioned_model, HigherOrderGP)

        self.check_transform_forward(model)
        self.check_transform_untransform(model)
示例#8
0
    def test_initialize_latents(self):
        manual_seed(0)

        train_x = rand(10, 1, device=self.device)
        train_y = randn(10, 3, 5, device=self.device)

        for latent_dim_sizes in [[1, 1], [2, 3]]:
            for latent_init in ["gp", "default"]:
                self.model = HigherOrderGP(
                    train_x,
                    train_y,
                    num_latent_dims=latent_dim_sizes,
                    latent_init=latent_init,
                )
                self.assertEqual(
                    self.model.latent_parameters[0].shape,
                    Size((3, latent_dim_sizes[0])),
                )
                self.assertEqual(
                    self.model.latent_parameters[1].shape,
                    Size((5, latent_dim_sizes[1])),
                )
示例#9
0
    def setUp(self):
        super().setUp()
        manual_seed(0)

        train_x = rand(2, 10, 1, device=self.device)
        train_y = randn(2, 10, 3, 5, device=self.device)

        m1 = HigherOrderGP(train_x, train_y)
        m2 = HigherOrderGP(train_x[0], train_y[0])

        manual_seed(0)
        test_x = rand(2, 5, 1, device=self.device)

        posterior1 = m1.posterior(test_x)
        posterior2 = m2.posterior(test_x[0])
        posterior3 = m2.posterior(test_x)

        self.post_list = [
            [m1, test_x, posterior1],
            [m2, test_x[0], posterior2],
            [m2, test_x, posterior3],
        ]
示例#10
0
class TestHigherOrderGP(BotorchTestCase):
    def setUp(self):
        super().setUp()
        torch.random.manual_seed(0)

        train_x = torch.rand(2, 10, 1, device=self.device)
        train_y = torch.randn(2, 10, 3, 5, device=self.device)

        self.model = HigherOrderGP(train_x, train_y)

        # check that we can assign different kernels and likelihoods
        model_2 = HigherOrderGP(
            train_X=train_x,
            train_Y=train_y,
            covar_modules=[RBFKernel(), RBFKernel(),
                           RBFKernel()],
            likelihood=GaussianLikelihood(),
        )

        model_3 = HigherOrderGP(
            train_X=train_x,
            train_Y=train_y,
            covar_modules=[RBFKernel(), RBFKernel(),
                           RBFKernel()],
            likelihood=GaussianLikelihood(),
            latent_init="gp",
        )

        for m in [self.model, model_2, model_3]:
            mll = ExactMarginalLogLikelihood(m.likelihood, m)
            fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})

    def test_num_output_dims(self):
        for dtype in [torch.float, torch.double]:
            train_x = torch.rand(2, 10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(2, 10, 3, 5, device=self.device, dtype=dtype)
            model = HigherOrderGP(train_x, train_y)

            # check that it correctly inferred that this is a batched model
            self.assertEqual(model._num_outputs, 2)

            train_x = torch.rand(10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(10, 3, 5, 2, device=self.device, dtype=dtype)
            model = HigherOrderGP(train_x, train_y)

            # non-batched case
            self.assertEqual(model._num_outputs, 1)

            train_x = torch.rand(3, 2, 10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(3,
                                  2,
                                  10,
                                  3,
                                  5,
                                  device=self.device,
                                  dtype=dtype)

            # check the error when using multi-dim batch_shape
            with self.assertRaises(NotImplementedError):
                model = HigherOrderGP(train_x, train_y)

    def test_posterior(self):
        for dtype in [torch.float, torch.double]:
            for mcs in [800, 10]:
                torch.random.manual_seed(0)
                with max_cholesky_size(mcs):
                    test_x = torch.rand(2, 12, 1).to(device=self.device,
                                                     dtype=dtype)

                    self.model.to(dtype)
                    # clear caches
                    self.model.train()
                    self.model.eval()
                    # test the posterior works
                    posterior = self.model.posterior(test_x)
                    self.assertIsInstance(posterior, GPyTorchPosterior)

                    # test the posterior works with observation noise
                    posterior = self.model.posterior(test_x,
                                                     observation_noise=True)
                    self.assertIsInstance(posterior, GPyTorchPosterior)

                    # test the posterior works with no variances
                    # some funkiness in MVNs registration so the variance is non-zero.
                    with skip_posterior_variances():
                        posterior = self.model.posterior(test_x)
                        self.assertIsInstance(posterior, GPyTorchPosterior)
                        self.assertLessEqual(posterior.variance.max(), 1e-6)

    def test_transforms(self):
        for dtype in [torch.float, torch.double]:
            train_x = torch.rand(10, 3, device=self.device, dtype=dtype)
            train_y = torch.randn(10, 4, 5, device=self.device, dtype=dtype)

            # test handling of Standardize
            with self.assertWarns(RuntimeWarning):
                model = HigherOrderGP(train_X=train_x,
                                      train_Y=train_y,
                                      outcome_transform=Standardize(m=5))
            self.assertIsInstance(model.outcome_transform,
                                  FlattenedStandardize)
            self.assertEqual(model.outcome_transform.output_shape,
                             train_y.shape[1:])
            self.assertEqual(model.outcome_transform.batch_shape, torch.Size())

            model = HigherOrderGP(
                train_X=train_x,
                train_Y=train_y,
                input_transform=Normalize(d=3),
                outcome_transform=FlattenedStandardize(train_y.shape[1:]),
            )
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})

            test_x = torch.rand(2, 5, 3, device=self.device, dtype=dtype)
            test_y = torch.randn(2, 5, 4, 5, device=self.device, dtype=dtype)
            with mock.patch.object(HigherOrderGP,
                                   "transform_inputs",
                                   wraps=model.transform_inputs) as mock_intf:
                posterior = model.posterior(test_x)
                mock_intf.assert_called_once()
            self.assertIsInstance(posterior, TransformedPosterior)

            conditioned_model = model.condition_on_observations(test_x, test_y)
            self.assertIsInstance(conditioned_model, HigherOrderGP)

            self.check_transform_forward(model, dtype)
            self.check_transform_untransform(model, dtype)

    def check_transform_forward(self, model, dtype):
        train_y = torch.randn(2, 10, 4, 5, device=self.device, dtype=dtype)
        train_y_var = torch.rand(2, 10, 4, 5, device=self.device, dtype=dtype)

        output, output_var = model.outcome_transform.forward(train_y)
        self.assertEqual(output.shape, torch.Size((2, 10, 4, 5)))
        self.assertEqual(output_var, None)

        output, output_var = model.outcome_transform.forward(
            train_y, train_y_var)
        self.assertEqual(output.shape, torch.Size((2, 10, 4, 5)))
        self.assertEqual(output_var.shape, torch.Size((2, 10, 4, 5)))

    def check_transform_untransform(self, model, dtype):
        output, output_var = model.outcome_transform.untransform(
            torch.randn(2, 2, 4, 5, device=self.device, dtype=dtype))
        self.assertEqual(output.shape, torch.Size((2, 2, 4, 5)))
        self.assertEqual(output_var, None)

        output, output_var = model.outcome_transform.untransform(
            torch.randn(2, 2, 4, 5, device=self.device, dtype=dtype),
            torch.rand(2, 2, 4, 5, device=self.device, dtype=dtype),
        )
        self.assertEqual(output.shape, torch.Size((2, 2, 4, 5)))
        self.assertEqual(output_var.shape, torch.Size((2, 2, 4, 5)))

    def test_condition_on_observations(self):
        for dtype in [torch.float, torch.double]:
            torch.random.manual_seed(0)
            test_x = torch.rand(2, 5, 1, device=self.device, dtype=dtype)
            test_y = torch.randn(2, 5, 3, 5, device=self.device, dtype=dtype)

            self.model.to(dtype)
            if dtype == torch.double:
                # need to clear float caches
                self.model.train()
                self.model.eval()
            # dummy call to ensure caches have been computed
            _ = self.model.posterior(test_x)
            conditioned_model = self.model.condition_on_observations(
                test_x, test_y)
            self.assertIsInstance(conditioned_model, HigherOrderGP)

    def test_fantasize(self):
        for dtype in [torch.float, torch.double]:
            torch.random.manual_seed(0)
            test_x = torch.rand(2, 5, 1, device=self.device, dtype=dtype)
            sampler = IIDNormalSampler(num_samples=32)

            self.model.to(dtype)
            if dtype == torch.double:
                # need to clear float caches
                self.model.train()
                self.model.eval()
            _ = self.model.posterior(test_x)
            fantasy_model = self.model.fantasize(test_x, sampler=sampler)
            self.assertIsInstance(fantasy_model, HigherOrderGP)
            self.assertEqual(fantasy_model.train_inputs[0].shape[:2],
                             torch.Size((32, 2)))

    def test_initialize_latents(self):
        for dtype in [torch.float, torch.double]:
            torch.random.manual_seed(0)

            train_x = torch.rand(10, 1, device=self.device, dtype=dtype)
            train_y = torch.randn(10, 3, 5, device=self.device, dtype=dtype)

            for latent_dim_sizes, latent_init in itertools.product(
                [[1, 1], [2, 3]],
                ["gp", "default"],
            ):
                self.model = HigherOrderGP(
                    train_x,
                    train_y,
                    num_latent_dims=latent_dim_sizes,
                    latent_init=latent_init,
                )
                self.assertEqual(
                    self.model.latent_parameters[0].shape,
                    torch.Size((3, latent_dim_sizes[0])),
                )
                self.assertEqual(
                    self.model.latent_parameters[1].shape,
                    torch.Size((5, latent_dim_sizes[1])),
                )
示例#11
0
class TestHigherOrderGP(BotorchTestCase):
    def setUp(self):
        super().setUp()
        manual_seed(0)

        train_x = rand(2, 10, 1)
        train_y = randn(2, 10, 3, 5)

        train_x = train_x.to(device=self.device)
        train_y = train_y.to(device=self.device)

        self.model = HigherOrderGP(train_x, train_y, first_dim_is_batch=True)

        # check that we can assign different kernels and likelihoods
        model_2 = HigherOrderGP(
            train_x,
            train_y,
            first_dim_is_batch=True,
            covar_modules=[RBFKernel(), RBFKernel(),
                           RBFKernel()],
            likelihood=GaussianLikelihood(),
        )

        for m in [self.model, model_2]:
            mll = ExactMarginalLogLikelihood(m.likelihood, m)
            fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})

    def test_posterior(self):
        manual_seed(0)
        test_x = rand(2, 30, 1).to(device=self.device)

        # test the posterior works
        posterior = self.model.posterior(test_x)
        self.assertIsInstance(posterior, GPyTorchPosterior)

        # test the posterior works with observation noise
        posterior = self.model.posterior(test_x, observation_noise=True)
        self.assertIsInstance(posterior, GPyTorchPosterior)

        # test the posterior works with no variances
        # some funkiness in MVNs registration so the variance is non-zero.
        with skip_posterior_variances():
            posterior = self.model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertLessEqual(posterior.variance.max(), 1e-6)

    def test_transforms(self):
        train_x = rand(10, 3, device=self.device)
        train_y = randn(10, 4, 5, device=self.device)
        model = HigherOrderGP(
            train_x,
            train_y,
            input_transform=Normalize(d=3),
            outcome_transform=FlattenedStandardize(train_y.shape[1:]),
        )
        mll = ExactMarginalLogLikelihood(model.likelihood, model)
        fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})

        test_x = rand(2, 5, 3, device=self.device)
        test_y = randn(2, 5, 4, 5, device=self.device)
        posterior = model.posterior(test_x)
        self.assertIsInstance(posterior, TransformedPosterior)

        conditioned_model = model.condition_on_observations(test_x, test_y)
        self.assertIsInstance(conditioned_model, HigherOrderGP)

        self.check_transform_forward(model)
        self.check_transform_untransform(model)

    def check_transform_forward(self, model):
        train_y = randn(2, 10, 4, 5, device=self.device)
        train_y_var = rand(2, 10, 4, 5, device=self.device)

        output, output_var = model.outcome_transform.forward(train_y)
        self.assertEqual(output.shape, Size((2, 10, 4, 5)))
        self.assertEqual(output_var, None)

        output, output_var = model.outcome_transform.forward(
            train_y, train_y_var)
        self.assertEqual(output.shape, Size((2, 10, 4, 5)))
        self.assertEqual(output_var.shape, Size((2, 10, 4, 5)))

    def check_transform_untransform(self, model):
        output, output_var = model.outcome_transform.untransform(
            randn(2, 2, 4, 5, device=self.device))
        self.assertEqual(output.shape, Size((2, 2, 4, 5)))
        self.assertEqual(output_var, None)

        output, output_var = model.outcome_transform.untransform(
            randn(2, 2, 4, 5, device=self.device),
            rand(2, 2, 4, 5, device=self.device),
        )
        self.assertEqual(output.shape, Size((2, 2, 4, 5)))
        self.assertEqual(output_var.shape, Size((2, 2, 4, 5)))

    def test_condition_on_observations(self):
        manual_seed(0)
        test_x = rand(2, 5, 1, device=self.device)
        test_y = randn(2, 5, 3, 5, device=self.device)

        # dummy call to ensure caches have been computed
        _ = self.model.posterior(test_x)
        conditioned_model = self.model.condition_on_observations(
            test_x, test_y)
        self.assertIsInstance(conditioned_model, HigherOrderGP)

    def test_fantasize(self):
        manual_seed(0)
        test_x = rand(2, 5, 1, device=self.device)
        sampler = IIDNormalSampler(num_samples=32).to(self.device)

        _ = self.model.posterior(test_x)
        fantasy_model = self.model.fantasize(test_x, sampler=sampler)
        self.assertIsInstance(fantasy_model, HigherOrderGP)
        self.assertEqual(fantasy_model.train_inputs[0].shape[:2], Size(
            (32, 2)))

    def test_initialize_latents(self):
        manual_seed(0)

        train_x = rand(10, 1, device=self.device)
        train_y = randn(10, 3, 5, device=self.device)

        for latent_dim_sizes, latent_init in itertools.product(
            [[1, 1], [2, 3]],
            ["gp", "default"],
        ):
            self.model = HigherOrderGP(
                train_x,
                train_y,
                num_latent_dims=latent_dim_sizes,
                latent_init=latent_init,
            )
            self.assertEqual(
                self.model.latent_parameters[0].shape,
                Size((3, latent_dim_sizes[0])),
            )
            self.assertEqual(
                self.model.latent_parameters[1].shape,
                Size((5, latent_dim_sizes[1])),
            )