def test_pyro_sampling(self): try: import pyro # noqa from pyro.infer.mcmc import NUTS, MCMC except ImportError: return train_x, test_x, train_y, test_y = self._get_data(cuda=False) likelihood = GaussianLikelihood( noise_constraint=gpytorch.constraints.Positive()) gp_model = ExactGPModel(train_x, train_y, likelihood) # Register normal GPyTorch priors gp_model.mean_module.register_prior("mean_prior", UniformPrior(-1, 1), "constant") gp_model.covar_module.base_kernel.register_prior( "lengthscale_prior", UniformPrior(0.01, 0.5), "lengthscale") gp_model.covar_module.register_prior("outputscale_prior", UniformPrior(1, 2), "outputscale") likelihood.register_prior("noise_prior", UniformPrior(0.05, 0.3), "noise") def pyro_model(x, y): with gpytorch.settings.fast_computations(False, False, False): sampled_model = gp_model.pyro_sample_from_prior() output = sampled_model.likelihood(sampled_model(x)) pyro.sample("obs", output, obs=y) return y nuts_kernel = NUTS(pyro_model, adapt_step_size=True) mcmc_run = MCMC(nuts_kernel, num_samples=3, warmup_steps=20, disable_progbar=True) mcmc_run.run(train_x, train_y) gp_model.pyro_load_from_samples(mcmc_run.get_samples()) gp_model.eval() expanded_test_x = test_x.unsqueeze(-1).repeat(3, 1, 1) output = gp_model(expanded_test_x) self.assertEqual(output.mean.size(0), 3) # All 3 samples should do reasonably well on a noiseless dataset. self.assertLess( torch.norm(output.mean[0] - test_y) / test_y.norm(), 0.2) self.assertLess( torch.norm(output.mean[1] - test_y) / test_y.norm(), 0.2) self.assertLess( torch.norm(output.mean[2] - test_y) / test_y.norm(), 0.2)
def test_pyro_sampling(self): try: import pyro from pyro.infer.mcmc import NUTS, MCMC except: return train_x, test_x, train_y, test_y = self._get_data(cuda=False) likelihood = GaussianLikelihood( noise_constraint=gpytorch.constraints.Positive()) gp_model = ExactGPModel(train_x, train_y, likelihood) # Register normal GPyTorch priors gp_model.mean_module.register_prior("mean_prior", UniformPrior(-1, 1), "constant") gp_model.covar_module.base_kernel.register_prior( "lengthscale_prior", UniformPrior(0.01, 0.2), "lengthscale") gp_model.covar_module.register_prior("outputscale_prior", UniformPrior(1, 2), "outputscale") likelihood.register_prior("noise_prior", LogNormalPrior(-1.5, 0.1), "noise") mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) def pyro_model(x, y): gp_model.pyro_sample_from_prior() output = gp_model(x) loss = mll.pyro_factor(output, y) return y nuts_kernel = NUTS(pyro_model, adapt_step_size=True) mcmc_run = MCMC(nuts_kernel, num_samples=3, warmup_steps=20) mcmc_run.run(train_x, train_y) gp_model.pyro_load_from_samples(mcmc_run.get_samples()) gp_model.eval() expanded_test_x = test_x.unsqueeze(-1).repeat(3, 1, 1) output = gp_model(expanded_test_x) self.assertEqual(output.mean.size(0), 3) # All 3 samples should do reasonably well on a noiseless dataset. self.assertLess( torch.norm(output.mean[0] - test_y) / test_y.norm(), 0.2) self.assertLess( torch.norm(output.mean[1] - test_y) / test_y.norm(), 0.2) self.assertLess( torch.norm(output.mean[2] - test_y) / test_y.norm(), 0.2)