コード例 #1
0
def model_setup(train_x, train_y_a, train_y_b, train_y_c):
    likelihood_a = gpytorch.likelihoods.GaussianLikelihood(batch_shape=torch.Size([30]))
    model_a = ExactGPModel(train_x, train_y_a, likelihood_a)
    likelihood_b = gpytorch.likelihoods.GaussianLikelihood(batch_shape=torch.Size([30]))
    model_b = ExactGPModel(train_x, train_y_b, likelihood_b)
    likelihood_c = gpytorch.likelihoods.GaussianLikelihood(batch_shape=torch.Size([30]))
    model_c = ExactGPModel(train_x, train_y_c, likelihood_c)

    model = gpytorch.models.IndependentModelList(model_a, model_b, model_c)
    likelihood = gpytorch.likelihoods.LikelihoodList(model_a.likelihood, model_b.likelihood, model_c.likelihood)
    mll = SumMarginalLogLikelihood(likelihood, model)
    model, likelihood = train(model, likelihood, mll)
    return model, likelihood
コード例 #2
0
    def test_simple_model_list_gp_regression(self, cuda=False):
        train_x1 = torch.linspace(0, 0.95, 25) + 0.05 * torch.rand(25)
        train_x2 = torch.linspace(0, 0.95, 15) + 0.05 * torch.rand(15)

        train_y1 = torch.sin(train_x1 *
                             (2 * math.pi)) + 0.2 * torch.randn_like(train_x1)
        train_y2 = torch.cos(train_x2 *
                             (2 * math.pi)) + 0.2 * torch.randn_like(train_x2)

        likelihood1 = GaussianLikelihood()
        model1 = ExactGPModel(train_x1, train_y1, likelihood1)

        likelihood2 = GaussianLikelihood()
        model2 = ExactGPModel(train_x2, train_y2, likelihood2)

        model = IndependentModelList(model1, model2)
        likelihood = LikelihoodList(model1.likelihood, model2.likelihood)

        if cuda:
            model = model.cuda()

        model.train()
        likelihood.train()

        mll = SumMarginalLogLikelihood(likelihood, model)

        optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1)

        for _ in range(10):
            optimizer.zero_grad()
            output = model(*model.train_inputs)
            loss = -mll(output, model.train_targets)
            loss.backward()
            optimizer.step()

        model.eval()
        likelihood.eval()

        with torch.no_grad(), gpytorch.settings.fast_pred_var():
            test_x = torch.linspace(
                0,
                1,
                10,
                device=torch.device("cuda") if cuda else torch.device("cpu"))
            outputs_f = model(test_x, test_x)
            predictions_obs_noise = likelihood(*outputs_f)

        self.assertIsInstance(outputs_f, list)
        self.assertEqual(len(outputs_f), 2)
        self.assertIsInstance(predictions_obs_noise, list)
        self.assertEqual(len(predictions_obs_noise), 2)
コード例 #3
0
ファイル: sparse_gp_list_torch.py プロジェクト: pnickl/reg
    def fit(self,
            target,
            input,
            nb_iter=100,
            lr=1e-1,
            verbose=True,
            preprocess=True):

        if preprocess:
            self.init_preprocess(target, input)
            target = transform(target, self.target_trans)
            input = transform(input, self.input_trans)

            # update inducing points
            inducing_idx = np.random.choice(len(input),
                                            self.inducing_size,
                                            replace=False)
            for i, _model in enumerate(self.model.models):
                _model.covar_module.inducing_points.data = input[inducing_idx,
                                                                 ...]

        target = target.to(self.device)
        input = input.to(self.device)

        for i, _model in enumerate(self.model.models):
            _model.set_train_data(input, target[:, i], strict=False)

        self.model.train().to(self.device)
        self.likelihood.train().to(self.device)

        optimizer = Adam([{'params': self.model.parameters()}], lr=lr)
        mll = SumMarginalLogLikelihood(self.likelihood, self.model)

        for i in range(nb_iter):
            optimizer.zero_grad()
            _output = self.model(*self.model.train_inputs)
            loss = -mll(_output, self.model.train_targets)
            loss.backward()
            if verbose:
                print('Iter %d/%d - Loss: %.3f' %
                      (i + 1, nb_iter, loss.item()))
            optimizer.step()
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
コード例 #4
0
    def test_ModelListGP(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model(n=10, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior,
                                      GammaPrior)

            # test model fitting
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test observation_noise
            posterior = model.posterior(test_x, observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test output_indices
            posterior = model.posterior(test_x,
                                        output_indices=[0],
                                        observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultivariateNormal)
コード例 #5
0
    def test_ModelListGP(self):
        for dtype, use_octf in itertools.product((torch.float, torch.double),
                                                 (False, True)):
            tkwargs = {"device": self.device, "dtype": dtype}
            model = _get_model(n=10, use_octf=use_octf, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior,
                                      GammaPrior)
                if use_octf:
                    self.assertIsInstance(m.outcome_transform, Standardize)

            # test constructing likelihood wrapper
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)

            # test model fitting (sequential)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll,
                                         options={"maxiter": 1},
                                         max_retries=1)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                # test model fitting (joint)
                mll = fit_gpytorch_model(mll,
                                         options={"maxiter": 1},
                                         max_retries=1,
                                         sequential=False)

            # test subset outputs
            subset_model = model.subset_output([1])
            self.assertIsInstance(subset_model, ModelListGP)
            self.assertEqual(len(subset_model.models), 1)
            sd_subset = subset_model.models[0].state_dict()
            sd = model.models[1].state_dict()
            self.assertTrue(set(sd_subset.keys()) == set(sd.keys()))
            self.assertTrue(
                all(torch.equal(v, sd[k]) for k, v in sd_subset.items()))

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)
            if use_octf:
                # ensure un-transformation is applied
                submodel = model.models[0]
                p0 = submodel.posterior(test_x)
                tmp_tf = submodel.outcome_transform
                del submodel.outcome_transform
                p0_tf = submodel.posterior(test_x)
                submodel.outcome_transform = tmp_tf
                expected_var = tmp_tf.untransform_posterior(p0_tf).variance
                self.assertTrue(torch.allclose(p0.variance, expected_var))

            # test observation_noise
            posterior = model.posterior(test_x, observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test output_indices
            posterior = model.posterior(test_x,
                                        output_indices=[0],
                                        observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultivariateNormal)

            # test condition_on_observations
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched
            f_x = torch.rand(3, 2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched (fast fantasies)
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations (incorrect input shape error)
            with self.assertRaises(BotorchTensorDimensionError):
                model.condition_on_observations(f_x,
                                                torch.rand(3, 2, 3, **tkwargs))
コード例 #6
0
    def test_ModelListGP_fixed_noise(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model(n=10, fixed_noise=True, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior,
                                      GammaPrior)

            # test model fitting
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test output_indices
            posterior = model.posterior(test_x,
                                        output_indices=[0],
                                        observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultivariateNormal)

            # test get_fantasy_model
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand_like(f_y)
            fantasy_model = model.get_fantasy_model(f_x, f_y, noise=noise)
            self.assertIsInstance(fantasy_model, ModelListGP)

            # test get_fantasy_model batched
            f_x = torch.rand(3, 2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand_like(f_y)
            fantasy_model = model.get_fantasy_model(f_x, f_y, noise=noise)
            self.assertIsInstance(fantasy_model, ModelListGP)

            # test get_fantasy_model batched (fast fantasies)
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand(2, 2, **tkwargs)
            fantasy_model = model.get_fantasy_model(f_x, f_y, noise=noise)
            self.assertIsInstance(fantasy_model, ModelListGP)

            # test get_fantasy_model (incorrect input shape error)
            with self.assertRaises(ValueError):
                model.get_fantasy_model(f_x,
                                        torch.rand(3, 2, 3, **tkwargs),
                                        noise=noise)
            # test get_fantasy_model (incorrect noise shape error)
            with self.assertRaises(ValueError):
                model.get_fantasy_model(f_x,
                                        f_y,
                                        noise=torch.rand(2, 3, **tkwargs))
コード例 #7
0
    def test_ModelListGP(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model(n=10, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior,
                                      GammaPrior)

            # test constructing likelihood wrapper
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)

            # test model fitting (sequential)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll,
                                         options={"maxiter": 1},
                                         max_retries=1)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                # test model fitting (joint)
                mll = fit_gpytorch_model(mll,
                                         options={"maxiter": 1},
                                         max_retries=1,
                                         sequential=False)

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test observation_noise
            posterior = model.posterior(test_x, observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # test output_indices
            posterior = model.posterior(test_x,
                                        output_indices=[0],
                                        observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultivariateNormal)

            # test condition_on_observations
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched
            f_x = torch.rand(3, 2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched (fast fantasies)
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations (incorrect input shape error)
            with self.assertRaises(BotorchTensorDimensionError):
                model.condition_on_observations(f_x,
                                                torch.rand(3, 2, 3, **tkwargs))
コード例 #8
0
    def test_ModelListGP_fixed_noise(self):
        for dtype, use_octf in itertools.product((torch.float, torch.double),
                                                 (False, True)):
            tkwargs = {"device": self.device, "dtype": dtype}
            model = _get_model(fixed_noise=True, use_octf=use_octf, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior,
                                      GammaPrior)

            # test model fitting
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll,
                                         options={"maxiter": 1},
                                         max_retries=1)

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)
            if use_octf:
                # ensure un-transformation is applied
                submodel = model.models[0]
                p0 = submodel.posterior(test_x)
                tmp_tf = submodel.outcome_transform
                del submodel.outcome_transform
                p0_tf = submodel.posterior(test_x)
                submodel.outcome_transform = tmp_tf
                expected_var = tmp_tf.untransform_posterior(p0_tf).variance
                self.assertTrue(torch.allclose(p0.variance, expected_var))

            # test output_indices
            posterior = model.posterior(test_x,
                                        output_indices=[0],
                                        observation_noise=True)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultivariateNormal)

            # test condition_on_observations
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand_like(f_y)
            cm = model.condition_on_observations(f_x, f_y, noise=noise)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched
            f_x = torch.rand(3, 2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand_like(f_y)
            cm = model.condition_on_observations(f_x, f_y, noise=noise)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched (fast fantasies)
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand(2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y, noise=noise)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations (incorrect input shape error)
            with self.assertRaises(BotorchTensorDimensionError):
                model.condition_on_observations(f_x,
                                                torch.rand(3, 2, 3, **tkwargs),
                                                noise=noise)
            # test condition_on_observations (incorrect noise shape error)
            f_y = torch.rand(2, 2, **tkwargs)
            with self.assertRaises(BotorchTensorDimensionError):
                model.condition_on_observations(f_x,
                                                f_y,
                                                noise=torch.rand(
                                                    2, 3, **tkwargs))
コード例 #9
0
    def test_ModelListGP_fixed_noise(self):
        for double in (False, True):
            tkwargs = {
                "device": self.device,
                "dtype": torch.double if double else torch.float,
            }
            model = _get_model(n=10, fixed_noise=True, **tkwargs)
            self.assertIsInstance(model, ModelListGP)
            self.assertIsInstance(model.likelihood, LikelihoodList)
            for m in model.models:
                self.assertIsInstance(m.mean_module, ConstantMean)
                self.assertIsInstance(m.covar_module, ScaleKernel)
                matern_kernel = m.covar_module.base_kernel
                self.assertIsInstance(matern_kernel, MaternKernel)
                self.assertIsInstance(matern_kernel.lengthscale_prior,
                                      GammaPrior)

            # test model fitting
            mll = SumMarginalLogLikelihood(model.likelihood, model)
            for mll_ in mll.mlls:
                self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll,
                                         options={"maxiter": 1},
                                         max_retries=1)

            # test posterior
            test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
            posterior = model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)

            # TODO: Add test back in once gpytorch > 0.3.5 is released
            # # test output_indices
            # posterior = model.posterior(
            #     test_x, output_indices=[0], observation_noise=True
            # )
            # self.assertIsInstance(posterior, GPyTorchPosterior)
            # self.assertIsInstance(posterior.mvn, MultivariateNormal)

            # test condition_on_observations
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand_like(f_y)
            cm = model.condition_on_observations(f_x, f_y, noise=noise)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched
            f_x = torch.rand(3, 2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand_like(f_y)
            cm = model.condition_on_observations(f_x, f_y, noise=noise)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations batched (fast fantasies)
            f_x = torch.rand(2, 1, **tkwargs)
            f_y = torch.rand(3, 2, 2, **tkwargs)
            noise = 0.1 + 0.1 * torch.rand(2, 2, **tkwargs)
            cm = model.condition_on_observations(f_x, f_y, noise=noise)
            self.assertIsInstance(cm, ModelListGP)

            # test condition_on_observations (incorrect input shape error)
            with self.assertRaises(BotorchTensorDimensionError):
                model.condition_on_observations(f_x,
                                                torch.rand(3, 2, 3, **tkwargs),
                                                noise=noise)
            # test condition_on_observations (incorrect noise shape error)
            f_y = torch.rand(2, 2, **tkwargs)
            with self.assertRaises(BotorchTensorDimensionError):
                model.condition_on_observations(f_x,
                                                f_y,
                                                noise=torch.rand(
                                                    2, 3, **tkwargs))