Exemplo n.º 1
0
def test_normal_gp_mll_backward():
    covar = torch.Tensor([
        [5, -3, 0],
        [-3, 5, 0],
        [0, 0, 2],
    ])
    y = torch.randn(3)

    covarvar = Variable(covar, requires_grad=True)
    yvar = Variable(y, requires_grad=True)
    actual_mat_grad = torch.ger(covar.inverse().mv(y), covar.inverse().mv(y))
    actual_mat_grad -= covar.inverse()
    actual_mat_grad *= 0.5
    actual_mat_grad *= 3  # For grad output

    actual_y_grad = -covar.inverse().mv(y)
    actual_y_grad *= 3  # For grad output

    covarvar = Variable(covar, requires_grad=True)
    yvar = Variable(y, requires_grad=True)
    gpytorch.functions.num_trace_samples = 1000
    output = gpytorch.exact_gp_marginal_log_likelihood(covarvar, yvar) * 3
    output.backward()

    assert (torch.norm(actual_mat_grad - covarvar.grad.data) < 1e-1)
    assert (torch.norm(actual_y_grad - yvar.grad.data) < 1e-4)

    gpytorch.functions.fastest = False
    covarvar = Variable(covar, requires_grad=True)
    yvar = Variable(y, requires_grad=True)
    output = gpytorch.exact_gp_marginal_log_likelihood(covarvar, yvar) * 3
    output.backward()

    assert (torch.norm(actual_mat_grad - covarvar.grad.data) < 1e-1)
    assert (torch.norm(actual_y_grad - yvar.grad.data) < 1e-4)
Exemplo n.º 2
0
    def marginal_log_likelihood(self, output, target, n_data=None):
        """
        Returns the marginal log likelihood of the data

        Args:
        - output: (GaussianRandomVariable) - the output of the model
        - target: (Variable) - target
        - n_data: (int) - total number of data points in the set (required only for SGD)
        """
        if n_data is None:
            n_data = len(target)
        n_batch = output.mean().size(0)

        # Exact inference
        if self.exact_inference:
            mean, covar = output.representation()
            return gpytorch.exact_gp_marginal_log_likelihood(
                covar, target - mean).div(n_data)

        # Approximate inference
        else:
            samples = output._variational_strategy.variational_samples(output)
            n_samples = samples.size(1)
            log_likelihood = self.likelihood.log_probability(
                samples.view(-1),
                target.unsqueeze(1).repeat(1, n_samples).view(-1))
            log_likelihood = log_likelihood.div(n_samples).div(n_batch)
            kl_divergence = output._variational_strategy.mvn_kl_divergence(
            ).div(n_data)

            res = log_likelihood - kl_divergence
            return res
Exemplo n.º 3
0
def test_forward():
    actual = y.dot(covar.inverse().mv(y))
    actual += math.log(np.linalg.det(covar.numpy()))
    actual += math.log(2 * math.pi) * len(y)
    actual *= -0.5

    covarvar = Variable(covar)
    yvar = Variable(y)

    res = gpytorch.exact_gp_marginal_log_likelihood(covarvar, yvar)
    assert (all(torch.abs(actual - res.data).div(res.data) < 0.1))
Exemplo n.º 4
0
    def marginal_log_likelihood(self, output, target):
        """
        Returns the marginal log likelihood of the data

        Args:
        - output: (GaussianRandomVariable) - the output of the model
        - target: (Variable) - target
        """
        mean, covar = output.representation()

        # Exact inference
        if self.exact_inference:
            return gpytorch.exact_gp_marginal_log_likelihood(
                covar, target - mean)

        # Approximate inference
        else:
            # Get inducing points
            if not hasattr(self, 'train_inputs'):
                raise RuntimeError('Must condition on data.')

            train_x = self.train_inputs[0]
            if hasattr(self, 'inducing_points'):
                inducing_points = Variable(self.inducing_points)
            else:
                inducing_points = train_x

            chol_var_covar = self.chol_variational_covar.triu()
            # Negate each row with a negative diagonal (the Cholesky decomposition
            # of a matrix requires that the diagonal elements be positive).
            inside = chol_var_covar.diag().sign().unsqueeze(1).expand_as(
                chol_var_covar).triu()
            chol_var_covar = chol_var_covar.mul(inside)

            _, train_covar = output.representation()
            inducing_output = super(GPModel, self).__call__(inducing_points)
            inducing_mean, inducing_covar = inducing_output.representation()

            train_covar = gpytorch.add_jitter(train_covar)
            log_likelihood = gpytorch.monte_carlo_log_likelihood(
                self.likelihood.log_probability, target, self.variational_mean,
                chol_var_covar, train_covar)

            inducing_covar = gpytorch.add_jitter(inducing_covar)
            kl_divergence = gpytorch.mvn_kl_divergence(self.variational_mean,
                                                       chol_var_covar,
                                                       inducing_mean,
                                                       inducing_covar)

            res = log_likelihood.squeeze() - kl_divergence
            return res
Exemplo n.º 5
0
def test_kp_toeplitz_gp_marginal_log_likelihood_forward():
    x = torch.cat([Variable(torch.linspace(0, 1, 2)).unsqueeze(1)] * 3, 1)
    y = torch.randn(2)
    rbf_module = RBFKernel()
    rbf_module.initialize(log_lengthscale=-2)
    covar_module = GridInterpolationKernel(rbf_module)
    covar_module.eval()
    covar_module.initialize_interpolation_grid(5, [(0, 1), (0, 1), (0, 1)])

    kronecker_var = covar_module.forward(x, x)
    kronecker_var_eval = kronecker_var.evaluate()
    res = kronecker_var.exact_gp_marginal_log_likelihood(Variable(y)).data
    actual = gpytorch.exact_gp_marginal_log_likelihood(kronecker_var_eval,
                                                       Variable(y)).data
    assert all(torch.abs((res - actual) / actual) < 0.05)
Exemplo n.º 6
0
def test_backward():
    covarvar = Variable(covar, requires_grad=True)
    yvar = Variable(y, requires_grad=True)
    actual_mat_grad = torch.ger(covar.inverse().mv(y), covar.inverse().mv(y))
    actual_mat_grad -= covar.inverse()
    actual_mat_grad *= 0.5
    actual_mat_grad *= 3  # For grad output

    actual_y_grad = -covar.inverse().mv(y)
    actual_y_grad *= 3  # For grad output

    covarvar = Variable(covar, requires_grad=True)
    yvar = Variable(y, requires_grad=True)
    output = gpytorch.exact_gp_marginal_log_likelihood(covarvar, yvar) * 3
    output.backward()

    assert (torch.norm(actual_mat_grad - covarvar.grad.data) < 1e-4)
    assert (torch.norm(actual_y_grad - yvar.grad.data) < 1e-4)
Exemplo n.º 7
0
def test_normal_gp_mll_forward():
    covar = torch.Tensor([
        [5, -3, 0],
        [-3, 5, 0],
        [0, 0, 2],
    ])
    y = torch.randn(3)

    actual = y.dot(covar.inverse().mv(y))
    actual += math.log(np.linalg.det(covar.numpy()))
    actual += math.log(2 * math.pi) * len(y)
    actual *= -0.5

    covarvar = Variable(covar)
    yvar = Variable(y)

    res = gpytorch.exact_gp_marginal_log_likelihood(covarvar, yvar)
    assert (all(torch.abs(actual - res.data).div(res.data) < 0.1))