Exemple #1
0
 def testMixedDerivativeVariationalGP(self):
     train_x = torch.cat(
         (torch.tensor([1.0, 2.0, 3.0, 4.0]).unsqueeze(1), torch.zeros(
             4, 1)),
         dim=1)
     train_y = torch.tensor([1.0, 2.0, 3.0, 4.0])
     m = MixedDerivativeVariationalGP(
         train_x=train_x,
         train_y=train_y,
         inducing_points=train_x,
         fixed_prior_mean=0.5,
     )
     self.assertEqual(m.mean_module.constant.item(), 0.5)
     self.assertEqual(m.covar_module.base_kernel.raw_lengthscale.shape,
                      torch.Size([1, 1]))
     mll = VariationalELBO(likelihood=BernoulliLikelihood(),
                           model=m,
                           num_data=train_y.numel())
     mll = fit_gpytorch_model(mll)
     test_x = torch.tensor([[1.0, 0], [3.0, 1.0]])
     m(test_x)
    def fit(self, X, y, lengthscale=None):
        
        if self.sparse:
            perm = torch.randperm(X.size(0))
            idx = perm[:50]
            self.inducing_points = X[idx];
            self.inducing_points.sort();
            class_model = VSGPClassificationModel;
        else:
            self.inducing_points = X;
            class_model = GPClassificationModel;

        self.X = X;
        self.y = y;

        self.model = class_model(self.inducing_points, lengthscale);
        # Use the adam optimizer
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.1)

        # "Loss" for GPs - the marginal log likelihood
        # num_data refers to the amount of training data
        self.mll = VariationalELBO(self.likelihood, self.model, self.y.numel())

        self.train();
model = GPClassificationModel(train_x)
likelihood = gpytorch.likelihoods.BernoulliLikelihood()

from gpytorch.mlls.variational_elbo import VariationalELBO

# Find optimal model hyperparameters
model = model
model.train()
likelihood.train()

# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

# "Loss" for GPs - the marginal log likelihood
# num_data refers to the amount of training data
mll = VariationalELBO(likelihood, model, train_y.numel())
with torch.cuda.device(0):
    training_iter = 100
    for i in range(training_iter):
        for j in range(6):
            # Zero backpropped gradients from previous iteration
            optimizer.zero_grad()
            # Get predictive output
            output = model(train_x[1000 * j:1000 * (j + 1)])
            # Calc loss and backprop gradients
            loss = -mll(output, train_y[1000 * j:1000 * (j + 1)])
            loss.backward()
            print('Iter %d/%d - Loss: %.3f' %
                  (i + 1, training_iter, loss.item()))
            optimizer.step()
'''