def test_exact_posterior():
    train_mean = Variable(torch.randn(4))
    train_y = Variable(torch.randn(4))
    test_mean = Variable(torch.randn(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([6, 0, 1, -1]), requires_grad=True)
    indices = Variable(torch.arange(0, 4).long().view(4, 1))
    values = Variable(torch.ones(4).view(4, 1))
    toeplitz_1 = InterpolatedLazyVariable(ToeplitzLazyVariable(c1_var),
                                          indices, values, indices, values)
    toeplitz_2 = InterpolatedLazyVariable(ToeplitzLazyVariable(c2_var),
                                          indices, values, indices, values)
    sum_lv = toeplitz_1 + toeplitz_2

    # Actual case
    actual = sum_lv.evaluate()

    # Test forward
    actual_alpha = gpytorch.posterior_strategy(actual).exact_posterior_alpha(
        train_mean, train_y)
    actual_mean = gpytorch.posterior_strategy(actual).exact_posterior_mean(
        test_mean, actual_alpha)
    sum_lv_alpha = sum_lv.posterior_strategy().exact_posterior_alpha(
        train_mean, train_y)
    sum_lv_mean = sum_lv.posterior_strategy().exact_posterior_mean(
        test_mean, sum_lv_alpha)
    assert (torch.norm(actual_mean.data - sum_lv_mean.data) < 1e-4)
Пример #2
0
def test_exact_posterior():
    train_mean = Variable(torch.randn(4))
    train_y = Variable(torch.randn(4))
    test_mean = Variable(torch.randn(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([[6, 0], [1, -1]]), requires_grad=True)
    c3_var = Variable(torch.Tensor([7, 2, 1, 0]), requires_grad=True)
    indices_1 = torch.arange(0, 4).long().view(4, 1)
    values_1 = torch.ones(4).view(4, 1)
    indices_2 = torch.arange(0, 2).expand(4, 2).long().view(2, 4, 1)
    values_2 = torch.ones(8).view(2, 4, 1)
    indices_3 = torch.arange(0, 4).long().view(4, 1)
    values_3 = torch.ones(4).view(4, 1)
    toeplitz_1 = InterpolatedLazyVariable(ToeplitzLazyVariable(c1_var),
                                          Variable(indices_1),
                                          Variable(values_1),
                                          Variable(indices_1),
                                          Variable(values_1))
    kronecker_product = KroneckerProductLazyVariable(c2_var, indices_2,
                                                     values_2, indices_2,
                                                     values_2)
    toeplitz_2 = InterpolatedLazyVariable(ToeplitzLazyVariable(c3_var),
                                          Variable(indices_3),
                                          Variable(values_3),
                                          Variable(indices_3),
                                          Variable(values_3))
    mul_lv = toeplitz_1 * kronecker_product * toeplitz_2

    # Actual case
    actual = mul_lv.evaluate()
    # Test forward
    actual_alpha = gpytorch.posterior_strategy(actual).exact_posterior_alpha(
        train_mean, train_y)
    actual_mean = gpytorch.posterior_strategy(actual).exact_posterior_mean(
        test_mean, actual_alpha)
    mul_lv_alpha = mul_lv.posterior_strategy().exact_posterior_alpha(
        train_mean, train_y)
    mul_lv_mean = mul_lv.posterior_strategy().exact_posterior_mean(
        test_mean, mul_lv_alpha)
    assert (torch.norm(actual_mean.data - mul_lv_mean.data) < 1e-3)
Пример #3
0
    def __call__(self, *args, **kwargs):
        output = None

        # Posterior mode
        if self.posterior:
            train_xs = self.train_inputs
            train_y = self.train_target
            if all([
                    torch.equal(train_x.data, input.data)
                    for train_x, input in zip(train_xs, args)
            ]):
                logging.warning('The input matches the stored training data. '
                                'Did you forget to call model.train()?')

            # Exact inference
            if self.exact_inference:
                n_train = len(train_xs[0])
                full_inputs = [
                    torch.cat([train_x, input])
                    for train_x, input in zip(train_xs, args)
                ]
                full_output = super(GPModel,
                                    self).__call__(*full_inputs, **kwargs)
                full_mean, full_covar = full_output.representation()

                train_mean = full_mean[:n_train]
                test_mean = full_mean[n_train:]
                train_train_covar = gpytorch.add_diag(
                    full_covar[:n_train, :n_train],
                    self.likelihood.log_noise.exp())
                train_test_covar = full_covar[:n_train, n_train:]
                test_train_covar = full_covar[n_train:, :n_train]
                test_test_covar = full_covar[n_train:, n_train:]

                # Calculate posterior components
                if not self.has_computed_alpha[0]:
                    alpha_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    alpha = alpha_strategy.exact_posterior_alpha(
                        train_mean, train_y)
                    self.alpha.copy_(alpha.data)
                    self.has_computed_alpha.fill_(1)
                else:
                    alpha = Variable(self.alpha)

                if not self.has_computed_lanczos[
                        0] and gpytorch.functions.fast_pred_var:
                    lanczos_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    q_mat, t_mat = lanczos_strategy.exact_posterior_lanczos()
                    self.lanczos_q_mat[:, :q_mat.size(1)].copy_(q_mat)
                    self.lanczos_t_mat[:t_mat.size(0), :t_mat.size(1)].copy_(
                        t_mat)
                    self.has_computed_lanczos.fill_(1)

                mean_strategy = gpytorch.posterior_strategy(test_train_covar)
                test_mean = mean_strategy.exact_posterior_mean(
                    test_mean, alpha)
                if gpytorch.functions.fast_pred_var:
                    covar_strategy = gpytorch.posterior_strategy(full_covar)
                    test_covar = covar_strategy.exact_posterior_covar_fast(
                        Variable(self.lanczos_q_mat),
                        Variable(self.lanczos_t_mat))
                else:
                    covar_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    test_covar = covar_strategy.exact_posterior_covar(
                        test_train_covar, train_test_covar, test_test_covar)
                output = GaussianRandomVariable(test_mean, test_covar)

            # Approximate inference
            else:
                output = super(GPModel, self).__call__(*args, **kwargs)

        # Training or Prior mode
        else:
            output = super(GPModel, self).__call__(*args, **kwargs)
            if self.conditioning:
                # Reset alpha cache
                _, covar = output.representation()
                self.has_computed_alpha.fill_(0)
                self.alpha.resize_(
                    gpytorch.posterior_strategy(covar).alpha_size())
                self.has_computed_lanczos.fill_(0)
                lanczos_q_size, lanczos_t_size = gpytorch.posterior_strategy(
                    covar).lanczos_size()
                self.lanczos_q_mat.resize_(lanczos_q_size).zero_()
                lanczos_t_mat_init = torch.eye(*lanczos_t_size).type_as(
                    self.lanczos_t_mat)
                self.lanczos_t_mat.resize_(lanczos_t_size).copy_(
                    lanczos_t_mat_init)

        # Don't go through the output if we're training a variational inference model
        if self.training and not self.exact_inference:
            return output

        # Now go through the likelihood
        if isinstance(output, Variable) or isinstance(
                output, RandomVariable) or isinstance(output, LazyVariable):
            output = (output, )
        return self.likelihood(*output)
Пример #4
0
    def __call__(self, *args, **kwargs):
        output = None

        # Posterior mode
        if self.posterior:
            train_xs = self.train_inputs
            train_y = self.train_target
            if all([
                    torch.equal(train_x.data, input.data)
                    for train_x, input in zip(train_xs, args)
            ]):
                logging.warning('The input matches the stored training data. '
                                'Did you forget to call model.train()?')

            n_train = len(train_xs[0])
            full_inputs = [
                torch.cat([train_x, input])
                for train_x, input in zip(train_xs, args)
            ]
            full_output = super(GPModel, self).__call__(*full_inputs, **kwargs)
            full_mean, full_covar = full_output.representation()

            # Exact inference
            if self.exact_inference:
                n_train = len(train_xs[0])
                full_inputs = [
                    torch.cat([train_x, input])
                    for train_x, input in zip(train_xs, args)
                ]
                full_output = super(GPModel,
                                    self).__call__(*full_inputs, **kwargs)
                full_mean, full_covar = full_output.representation()

                train_mean = full_mean[:n_train]
                test_mean = full_mean[n_train:]
                train_train_covar = gpytorch.add_diag(
                    full_covar[:n_train, :n_train],
                    self.likelihood.log_noise.exp())
                train_test_covar = full_covar[:n_train, n_train:]
                test_train_covar = full_covar[n_train:, :n_train]
                test_test_covar = full_covar[n_train:, n_train:]

                # Calculate posterior components
                if not self.has_computed_alpha[0]:
                    alpha_strategy = gpytorch.posterior_strategy(
                        train_train_covar)
                    alpha = alpha_strategy.exact_posterior_alpha(
                        train_mean, train_y)
                    self.alpha.copy_(alpha.data)
                    self.has_computed_alpha.fill_(1)
                else:
                    alpha = Variable(self.alpha)
                mean_strategy = gpytorch.posterior_strategy(test_train_covar)
                test_mean = mean_strategy.exact_posterior_mean(
                    test_mean, alpha)
                covar_strategy = gpytorch.posterior_strategy(train_train_covar)
                test_covar = covar_strategy.exact_posterior_covar(
                    test_train_covar, train_test_covar, test_test_covar)
                output = GaussianRandomVariable(test_mean, test_covar)

            # Approximate inference
            else:
                # Ensure variational parameters have been initalized
                if not self.variational_mean.numel():
                    raise RuntimeError(
                        'Variational parameters have not been initalized.'
                        'Condition on data.')

                # Get inducing points
                if hasattr(self, 'inducing_points'):
                    inducing_points = Variable(self.inducing_points)
                else:
                    inducing_points = train_xs[0]

                n_induc = len(inducing_points)
                full_input = torch.cat([inducing_points, args[0]])
                full_output = super(GPModel,
                                    self).__call__(full_input, **kwargs)
                full_mean, full_covar = full_output.representation()

                test_mean = full_mean[n_induc:]
                induc_induc_covar = full_covar[:n_induc, :n_induc]
                induc_test_covar = full_covar[:n_induc, n_induc:]
                test_induc_covar = full_covar[n_induc:, :n_induc]
                test_test_covar = full_covar[n_induc:, n_induc:]

                # Calculate posterior components
                if not self.has_computed_alpha[0]:
                    alpha_strategy = gpytorch.posterior_strategy(
                        induc_induc_covar)
                    alpha = alpha_strategy.variational_posterior_alpha(
                        self.variational_mean)
                    self.alpha.copy_(alpha.data)
                    self.has_computed_alpha.fill_(1)
                else:
                    alpha = Variable(self.alpha)
                mean_strategy = gpytorch.posterior_strategy(test_induc_covar)
                test_mean = mean_strategy.variational_posterior_mean(alpha)
                covar_strategy = gpytorch.posterior_strategy(test_induc_covar)
                test_covar = covar_strategy.variational_posterior_covar(
                    induc_test_covar, self.chol_variational_covar,
                    test_test_covar, induc_induc_covar)
                output = GaussianRandomVariable(test_mean, test_covar)

        # Training or Prior mode
        else:
            output = super(GPModel, self).__call__(*args, **kwargs)
            # Add some jitter
            if not self.exact_inference:
                mean, covar = output.representation()
                covar = gpytorch.add_jitter(covar)
                output = GaussianRandomVariable(mean, covar)

            if self.conditioning:
                # Reset alpha cache
                _, covar = output.representation()
                self.has_computed_alpha.fill_(0)
                self.alpha.resize_(
                    gpytorch.posterior_strategy(covar).alpha_size())

        # Now go through the likelihood
        if isinstance(output, Variable) or isinstance(
                output, RandomVariable) or isinstance(output, LazyVariable):
            output = (output, )
        return self.likelihood(*output)