Exemple #1
0
    def test_forward(self):
        a = nn.Parameter(torch.Tensor([5]))
        b = Variable(torch.ones(3, 3))
        output = gpytorch.add_diag(b, a)

        actual = torch.Tensor([[6, 1, 1], [1, 6, 1], [1, 1, 6]])
        self.assertLess(torch.norm(output.data - actual), 1e-7)
    def test_add_diag(self):
        lv = InvQuadLazyVariable(self.base_mat_var, self.left_mat_var,
                                 self.right_mat_var, self.diag_var)
        ev = lv.evaluate()

        res = lv.add_diag(torch.Tensor([0.5])).evaluate()
        actual = gpytorch.add_diag(ev, torch.Tensor([0.5]))
        assert ((res - actual).norm() / actual.norm()).item() < 1e-3
Exemple #3
0
def test_backward():
    grad = torch.randn(3, 3)

    a = nn.Parameter(torch.Tensor([3]))
    b = Variable(torch.ones(3, 3), requires_grad=True)
    output = gpytorch.add_diag(b, a)
    output.backward(gradient=grad)

    assert (math.fabs(a.grad.data[0] - grad.trace()) < 1e-6)
    assert (torch.norm(b.grad.data - grad) < 1e-6)
Exemple #4
0
 def add_diag(self, diag):
     return NonLazyVariable(gpytorch.add_diag(self.tensor, diag))
Exemple #5
0
 def forward(self, input):
     assert (isinstance(input, GaussianRandomVariable))
     mean, covar = input.representation()
     noise = gpytorch.add_diag(covar, self.log_noise.exp())
     return GaussianRandomVariable(mean, noise)
Exemple #6
0
    def __call__(self, *args, **kwargs):
        output = None

        # Posterior mode
        if self.posterior:
            train_xs = self.train_inputs
            train_y = self.train_target
            if all([
                    torch.equal(train_x.data, input.data)
                    for train_x, input in zip(train_xs, args)
            ]):
                logging.warning('The input matches the stored training data. '
                                'Did you forget to call model.train()?')

            # Exact inference
            if self.exact_inference:
                n_train = len(train_xs[0])
                full_inputs = [
                    torch.cat([train_x, input])
                    for train_x, input in zip(train_xs, args)
                ]
                full_output = super(GPModel,
                                    self).__call__(*full_inputs, **kwargs)
                full_mean, full_covar = full_output.representation()

                train_mean = full_mean[:n_train]
                test_mean = full_mean[n_train:]
                train_train_covar = gpytorch.add_diag(
                    full_covar[:n_train, :n_train],
                    self.likelihood.log_noise.exp())
                train_test_covar = full_covar[:n_train, n_train:]
                test_train_covar = full_covar[n_train:, :n_train]
                test_test_covar = full_covar[n_train:, n_train:]

                # Calculate posterior components
                if not self.has_computed_alpha[0]:
                    alpha_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    alpha = alpha_strategy.exact_posterior_alpha(
                        train_mean, train_y)
                    self.alpha.copy_(alpha.data)
                    self.has_computed_alpha.fill_(1)
                else:
                    alpha = Variable(self.alpha)

                if not self.has_computed_lanczos[
                        0] and gpytorch.functions.fast_pred_var:
                    lanczos_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    q_mat, t_mat = lanczos_strategy.exact_posterior_lanczos()
                    self.lanczos_q_mat[:, :q_mat.size(1)].copy_(q_mat)
                    self.lanczos_t_mat[:t_mat.size(0), :t_mat.size(1)].copy_(
                        t_mat)
                    self.has_computed_lanczos.fill_(1)

                mean_strategy = gpytorch.posterior_strategy(test_train_covar)
                test_mean = mean_strategy.exact_posterior_mean(
                    test_mean, alpha)
                if gpytorch.functions.fast_pred_var:
                    covar_strategy = gpytorch.posterior_strategy(full_covar)
                    test_covar = covar_strategy.exact_posterior_covar_fast(
                        Variable(self.lanczos_q_mat),
                        Variable(self.lanczos_t_mat))
                else:
                    covar_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    test_covar = covar_strategy.exact_posterior_covar(
                        test_train_covar, train_test_covar, test_test_covar)
                output = GaussianRandomVariable(test_mean, test_covar)

            # Approximate inference
            else:
                output = super(GPModel, self).__call__(*args, **kwargs)

        # Training or Prior mode
        else:
            output = super(GPModel, self).__call__(*args, **kwargs)
            if self.conditioning:
                # Reset alpha cache
                _, covar = output.representation()
                self.has_computed_alpha.fill_(0)
                self.alpha.resize_(
                    gpytorch.posterior_strategy(covar).alpha_size())
                self.has_computed_lanczos.fill_(0)
                lanczos_q_size, lanczos_t_size = gpytorch.posterior_strategy(
                    covar).lanczos_size()
                self.lanczos_q_mat.resize_(lanczos_q_size).zero_()
                lanczos_t_mat_init = torch.eye(*lanczos_t_size).type_as(
                    self.lanczos_t_mat)
                self.lanczos_t_mat.resize_(lanczos_t_size).copy_(
                    lanczos_t_mat_init)

        # Don't go through the output if we're training a variational inference model
        if self.training and not self.exact_inference:
            return output

        # Now go through the likelihood
        if isinstance(output, Variable) or isinstance(
                output, RandomVariable) or isinstance(output, LazyVariable):
            output = (output, )
        return self.likelihood(*output)
Exemple #7
0
    def __call__(self, *args, **kwargs):
        output = None

        # Posterior mode
        if self.posterior:
            train_xs = self.train_inputs
            train_y = self.train_target
            if all([
                    torch.equal(train_x.data, input.data)
                    for train_x, input in zip(train_xs, args)
            ]):
                logging.warning('The input matches the stored training data. '
                                'Did you forget to call model.train()?')

            n_train = len(train_xs[0])
            full_inputs = [
                torch.cat([train_x, input])
                for train_x, input in zip(train_xs, args)
            ]
            full_output = super(GPModel, self).__call__(*full_inputs, **kwargs)
            full_mean, full_covar = full_output.representation()

            # Exact inference
            if self.exact_inference:
                n_train = len(train_xs[0])
                full_inputs = [
                    torch.cat([train_x, input])
                    for train_x, input in zip(train_xs, args)
                ]
                full_output = super(GPModel,
                                    self).__call__(*full_inputs, **kwargs)
                full_mean, full_covar = full_output.representation()

                train_mean = full_mean[:n_train]
                test_mean = full_mean[n_train:]
                train_train_covar = gpytorch.add_diag(
                    full_covar[:n_train, :n_train],
                    self.likelihood.log_noise.exp())
                train_test_covar = full_covar[:n_train, n_train:]
                test_train_covar = full_covar[n_train:, :n_train]
                test_test_covar = full_covar[n_train:, n_train:]

                # Calculate posterior components
                if not self.has_computed_alpha[0]:
                    alpha_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    alpha = alpha_strategy.exact_posterior_alpha(
                        train_mean, train_y)
                    self.alpha.copy_(alpha.data)
                    self.has_computed_alpha.fill_(1)
                else:
                    alpha = Variable(self.alpha)
                mean_strategy = gpytorch.posterior_strategy(test_train_covar)
                test_mean = mean_strategy.exact_posterior_mean(
                    test_mean, alpha)
                covar_strategy = gpytorch.posterior_strategy(train_train_covar)
                test_covar = covar_strategy.exact_posterior_covar(
                    test_train_covar, train_test_covar, test_test_covar)
                output = GaussianRandomVariable(test_mean, test_covar)

            # Approximate inference
            else:
                # Ensure variational parameters have been initalized
                if not self.variational_mean.numel():
                    raise RuntimeError(
                        'Variational parameters have not been initalized.'
                        'Condition on data.')

                # Get inducing points
                if hasattr(self, 'inducing_points'):
                    inducing_points = Variable(self.inducing_points)
                else:
                    inducing_points = train_xs[0]

                n_induc = len(inducing_points)
                full_input = torch.cat([inducing_points, args[0]])
                full_output = super(GPModel,
                                    self).__call__(full_input, **kwargs)
                full_mean, full_covar = full_output.representation()

                test_mean = full_mean[n_induc:]
                induc_induc_covar = full_covar[:n_induc, :n_induc]
                induc_test_covar = full_covar[:n_induc, n_induc:]
                test_induc_covar = full_covar[n_induc:, :n_induc]
                test_test_covar = full_covar[n_induc:, n_induc:]

                # Calculate posterior components
                if not self.has_computed_alpha[0]:
                    alpha_strategy = gpytorch.posterior_strategy(
                        induc_induc_covar)
                    alpha = alpha_strategy.variational_posterior_alpha(
                        self.variational_mean)
                    self.alpha.copy_(alpha.data)
                    self.has_computed_alpha.fill_(1)
                else:
                    alpha = Variable(self.alpha)
                mean_strategy = gpytorch.posterior_strategy(test_induc_covar)
                test_mean = mean_strategy.variational_posterior_mean(alpha)
                covar_strategy = gpytorch.posterior_strategy(test_induc_covar)
                test_covar = covar_strategy.variational_posterior_covar(
                    induc_test_covar, self.chol_variational_covar,
                    test_test_covar, induc_induc_covar)
                output = GaussianRandomVariable(test_mean, test_covar)

        # Training or Prior mode
        else:
            output = super(GPModel, self).__call__(*args, **kwargs)
            # Add some jitter
            if not self.exact_inference:
                mean, covar = output.representation()
                covar = gpytorch.add_jitter(covar)
                output = GaussianRandomVariable(mean, covar)

            if self.conditioning:
                # Reset alpha cache
                _, covar = output.representation()
                self.has_computed_alpha.fill_(0)
                self.alpha.resize_(
                    gpytorch.posterior_strategy(covar).alpha_size())

        # Now go through the likelihood
        if isinstance(output, Variable) or isinstance(
                output, RandomVariable) or isinstance(output, LazyVariable):
            output = (output, )
        return self.likelihood(*output)