def test_log_det_only(self): # Forward pass with gpytorch.settings.num_trace_samples(1000): res = NonLazyVariable(self.mat_var).log_det() actual = self.mat_var_clone.det().log() self.assertAlmostEqual(res.item(), actual.item(), places=1) # Backward actual.backward() res.backward() self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1))
def test_log_det_only(self): # Forward pass with gpytorch.settings.num_trace_samples(1000): res = NonLazyVariable(self.mat_var).log_det() self.assert_scalar_almost_equal(res, self.log_det, places=1) # Backward grad_output = torch.Tensor([3]) actual_mat_grad = self.mat_var_clone.data.inverse().mul(grad_output) res.backward(gradient=grad_output) self.assertTrue(approx_equal(actual_mat_grad, self.mat_var.grad.data, epsilon=1e-1))
def test_inv_quad_only_many_vectors(self): # Forward pass res = NonLazyVariable(self.mat_var).inv_quad(self.vecs_var) actual = self.mat_var_clone.inverse().matmul(self.vecs_var_clone).mul(self.vecs_var_clone).sum() self.assertAlmostEqual(res.item(), actual.item(), places=1) # Backward actual.backward() res.backward() self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1)) self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_matmul_multiple_vecs(self): # Forward res = NonLazyVariable(self.mat_var).matmul(self.vecs_var) actual = self.mat_var_clone.matmul(self.vecs_var_clone) self.assertTrue(approx_equal(res, actual)) # Backward grad_output = torch.Tensor(3, 4) res.backward(gradient=grad_output) actual.backward(gradient=grad_output) self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data)) self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_inv_matmul_vec(self): # Forward res = NonLazyVariable(self.mat_var).inv_matmul(self.vec_var) actual = self.mat_var_clone.inverse().matmul(self.vec_var_clone) self.assertTrue(approx_equal(res, actual)) # Backward grad_output = torch.randn(3) res.backward(gradient=grad_output) actual.backward(gradient=grad_output) self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data)) self.assertTrue(approx_equal(self.vec_var_clone.grad.data, self.vec_var.grad.data))
def test_inv_quad_only_many_vectors(self): # Forward pass res = NonLazyVariable(self.mat_var).inv_quad(self.vecs_var) actual = self.mat_var_clone.inverse().matmul(self.vecs_var_clone).mul(self.vecs_var_clone).sum() self.assert_scalar_almost_equal(res, actual, places=1) # Backward inv_quad_grad_output = torch.randn(1) actual.backward(gradient=inv_quad_grad_output) res.backward(gradient=inv_quad_grad_output) self.assertTrue(approx_equal(self.mat_var_clone.grad.data, self.mat_var.grad.data, epsilon=1e-1)) self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_log_det_only(self): # Forward pass with gpytorch.settings.num_trace_samples(1000): res = NonLazyVariable(self.mats_var).log_det() actual = torch.cat( [self.mats_var_clone[0].det().log().unsqueeze(0), self.mats_var_clone[1].det().log().unsqueeze(0)] ) self.assertTrue(approx_equal(res.data, actual.data, epsilon=1e-1)) # Backward grad_output = torch.Tensor([3, 4]) actual.backward(gradient=grad_output) res.backward(gradient=grad_output) self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1))
def test_log_det_only(self): # Forward pass with gpytorch.settings.num_trace_samples(1000): res = NonLazyVariable(self.mats_var).log_det() for i in range(self.mats_var.size(0)): self.assert_scalar_almost_equal(res.data[i], self.log_dets[i], places=1) # Backward grad_output = torch.Tensor([3, 4]) actual_mat_grad = torch.cat([ self.mats_var_clone[0].data.inverse().mul(grad_output[0]).unsqueeze(0), self.mats_var_clone[1].data.inverse().mul(grad_output[1]).unsqueeze(0), ]) res.backward(gradient=grad_output) self.assertTrue(approx_equal(actual_mat_grad, self.mats_var.grad.data, epsilon=1e-1))
def test_inv_matmul_multiple_vecs(self): # Forward res = NonLazyVariable(self.mats_var).inv_matmul(self.vecs_var) actual = torch.cat([ self.mats_var_clone[0].inverse().unsqueeze(0), self.mats_var_clone[1].inverse().unsqueeze(0), ]).matmul(self.vecs_var_clone) self.assertTrue(approx_equal(res, actual)) # Backward grad_output = torch.randn(2, 3, 4) res.backward(gradient=grad_output) actual.backward(gradient=grad_output) self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data)) self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_inv_quad_only_many_vectors(self): # Forward pass res = NonLazyVariable(self.mats_var).inv_quad(self.vecs_var).sum() actual = ( torch.cat([self.mats_var_clone[0].inverse().unsqueeze(0), self.mats_var_clone[1].inverse().unsqueeze(0)]) .matmul(self.vecs_var_clone) .mul(self.vecs_var_clone) .sum(2) .sum(1) ).sum() self.assertTrue(approx_equal(res.data, actual.data, epsilon=1e-1)) # Backward actual.backward() res.backward() self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1)) self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))
def test_inv_quad_only_many_vectors(self): # Forward pass res = NonLazyVariable(self.mats_var).inv_quad(self.vecs_var) actual = torch.cat([ self.mats_var_clone[0].inverse().unsqueeze(0), self.mats_var_clone[1].inverse().unsqueeze(0), ]).matmul(self.vecs_var_clone).mul(self.vecs_var_clone).sum(2).sum(1) for i in range(self.mats_var.size(0)): self.assert_scalar_almost_equal(res.data[i], actual.data[i], places=1) # Backward inv_quad_grad_output = torch.randn(2) actual.backward(gradient=inv_quad_grad_output) res.backward(gradient=inv_quad_grad_output) self.assertTrue(approx_equal(self.mats_var_clone.grad.data, self.mats_var.grad.data, epsilon=1e-1)) self.assertTrue(approx_equal(self.vecs_var_clone.grad.data, self.vecs_var.grad.data))