Example #1
0
def test():
    model.eval()
    likelihood.eval()

    correct = 0
    with torch.no_grad(), num_likelihood_samples(16):
        for data, target in test_loader:
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            output = likelihood(model(data))  # This gives us 16 samples from the predictive distribution
            pred = output.probs.mean(0).argmax(-1)  # Taking the mean over all of the sample we've drawn
            correct += pred.eq(target.view_as(pred)).cpu().sum()
    print('Test set: Accuracy: {}/{} ({}%)'.format(
        correct, len(test_loader.dataset), 100. * correct / float(len(test_loader.dataset))
    ))
Example #2
0
def train(epoch):
    model.train()
    likelihood.train()

    minibatch_iter = tqdm(train_loader, desc=f"(Epoch {epoch}) Minibatch")
    with num_likelihood_samples(8):
        for data, target in minibatch_iter:
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            optimizer.zero_grad()
            output = model(data)
            loss = -mll(output, target)
            loss.backward()
            optimizer.step()
            minibatch_iter.set_postfix(loss=loss.item())
Example #3
0
    def get_predictive_distribution(self, X, full_covariance=False):
        """
        Get distribution for novel input.
        """
        X = self.X_scaler(X)
        f_dist = self(X)

        with torch.no_grad(), settings.num_likelihood_samples(1):
            y_dist = self.likelihood(f_dist)

        no_scaling = (torch.all(self.Y_scaler.means == 0)
                      and torch.all(self.Y_scaler.stds == 1))

        if no_scaling:
            return y_dist
        elif isinstance(y_dist, MultivariateNormal):
            mean = self.Y_scaler.inverse_transform(y_dist.mean)
            variance = y_dist.variance * self.Y_scaler.stds**2
            if full_covariance:
                return MultivariateNormal(mean, torch.diag(variance))
            else:
                return Normal(mean, variance.sqrt())
        else:
            raise NotImplementedError
# we need to handle the objective function (e.g. the ELBO) in a slightly different way.
num_samples = 10

optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1)
'''
DeepApproximateMLL only adds the elbo losses of each layer!
'''

marginal_loglikelihood = DeepApproximateMLL(
    VariationalELBO(model.likelihood, model, x_train.shape[-2]))

n_epochs = 100
for i in range(n_epochs):

    for x_batch, y_batch in train_loader:
        with num_likelihood_samples(num_samples):
            optimizer.zero_grad()
            output = model(x_batch)
            loss = -marginal_loglikelihood(output, y_batch)
            loss.backward()
            optimizer.step()

    print(f"epochs {i}, loss {loss.item()}")

## test and evaluate the model

model.eval()
predictive_means, predictive_variances, test_loglikelihoods = model.predict(
    test_loader)

rmse = torch.mean(torch.pow(predictive_means.mean(0) - y_test, 2)).sqrt()
Example #5
0
from gpytorch.likelihoods import GaussianLikelihood, BernoulliLikelihood, SoftmaxLikelihood
import torch
import numpy as np
from gpytorch.settings import num_likelihood_samples
from torch.distributions import MultivariateNormal

#likelihood = GaussianLikelihood()
#likelihood = BernoulliLikelihood()
likelihood = SoftmaxLikelihood(num_classes=5, num_features=2)

observations = torch.from_numpy(np.array([1.0, 1.0])).type(torch.float32)
mean = torch.from_numpy(np.array([[1.0], [2.0]])).type(torch.float32)
covar = torch.from_numpy(np.array([[1.0, 0.0], [0.0,
                                                1.0]])).type(torch.float32)
multivariate_normal = MultivariateNormal(mean, covar)
with num_likelihood_samples(8000):
    explog = likelihood.expected_log_prob(observations, multivariate_normal)

print(explog)