def test_norm_to_lognorm(self): for dtype in (torch.float, torch.double): # Test joint, independent expmu = torch.tensor([1.0, 2.0, 3.0], device=self.device, dtype=dtype) expdiag = torch.tensor([1.5, 2.0, 3], device=self.device, dtype=dtype) mu = torch.log(expmu) diag = torch.log(expdiag) Cov = torch.diag_embed(diag) mu_ln, Cov_ln = norm_to_lognorm(mu, Cov) mu_ln_expected = expmu * torch.exp(0.5 * diag) diag_ln_expected = torch.tensor([0.75, 8.0, 54.0], device=self.device, dtype=dtype) Cov_ln_expected = torch.diag_embed(diag_ln_expected) self.assertTrue(torch.allclose(Cov_ln, Cov_ln_expected)) self.assertTrue(torch.allclose(mu_ln, mu_ln_expected)) # Test joint, correlated Cov[0, 2] = 0.1 Cov[2, 0] = 0.1 mu_ln, Cov_ln = norm_to_lognorm(mu, Cov) Cov_ln_expected[0, 2] = 0.669304 Cov_ln_expected[2, 0] = 0.669304 self.assertTrue(torch.allclose(Cov_ln, Cov_ln_expected)) self.assertTrue(torch.allclose(mu_ln, mu_ln_expected)) # Test marginal mu = torch.tensor([-1.0, 0.0, 1.0], device=self.device, dtype=dtype) v = torch.tensor([1.0, 2.0, 3.0], device=self.device, dtype=dtype) var = 2 * (torch.log(v) - mu) mu_ln = norm_to_lognorm_mean(mu, var) var_ln = norm_to_lognorm_variance(mu, var) mu_ln_expected = torch.tensor([1.0, 2.0, 3.0], device=self.device, dtype=dtype) var_ln_expected = (torch.exp(var) - 1) * mu_ln_expected**2 self.assertTrue(torch.allclose(mu_ln, mu_ln_expected)) self.assertTrue(torch.allclose(var_ln, var_ln_expected))
def test_log(self): ms = (1, 2) batch_shapes = (torch.Size(), torch.Size([2])) dtypes = (torch.float, torch.double) # test transform and untransform for m, batch_shape, dtype in itertools.product(ms, batch_shapes, dtypes): # test init tf = Log() self.assertTrue(tf.training) self.assertIsNone(tf._outputs) # no observation noise Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Y_tf, Yvar_tf = tf(Y, None) self.assertTrue(tf.training) self.assertTrue(torch.allclose(Y_tf, torch.log(Y))) self.assertIsNone(Yvar_tf) tf.eval() self.assertFalse(tf.training) Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf) torch.allclose(Y_utf, Y) self.assertIsNone(Yvar_utf) # subset_output tf_subset = tf.subset_output(idcs=[0]) Y_tf_subset, Yvar_tf_subset = tf_subset(Y[..., [0]]) self.assertTrue(torch.equal(Y_tf[..., [0]], Y_tf_subset)) self.assertIsNone(Yvar_tf_subset) # test error if observation noise present tf = Log() Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Yvar = 1e-8 + torch.rand( *batch_shape, 3, m, device=self.device, dtype=dtype ) with self.assertRaises(NotImplementedError): tf(Y, Yvar) tf.eval() with self.assertRaises(NotImplementedError): tf.untransform(Y, Yvar) # untransform_posterior tf = Log() Y_tf, Yvar_tf = tf(Y, None) tf.eval() shape = batch_shape + torch.Size([3, m]) posterior = _get_test_posterior(shape, device=self.device, dtype=dtype) p_utf = tf.untransform_posterior(posterior) self.assertIsInstance(p_utf, TransformedPosterior) self.assertEqual(p_utf.device.type, self.device.type) self.assertTrue(p_utf.dtype == dtype) self.assertTrue(p_utf._sample_transform == torch.exp) mean_expected = norm_to_lognorm_mean(posterior.mean, posterior.variance) variance_expected = norm_to_lognorm_variance( posterior.mean, posterior.variance ) self.assertTrue(torch.allclose(p_utf.mean, mean_expected)) self.assertTrue(torch.allclose(p_utf.variance, variance_expected)) samples = p_utf.rsample() self.assertEqual(samples.shape, torch.Size([1]) + shape) samples = p_utf.rsample(sample_shape=torch.Size([4])) self.assertEqual(samples.shape, torch.Size([4]) + shape) samples2 = p_utf.rsample(sample_shape=torch.Size([4, 2])) self.assertEqual(samples2.shape, torch.Size([4, 2]) + shape) # test transforming a subset of outcomes for batch_shape, dtype in itertools.product(batch_shapes, dtypes): m = 2 outputs = [-1] # test init tf = Log(outputs=outputs) self.assertTrue(tf.training) # cannot normalize indices b/c we don't know dimension yet self.assertEqual(tf._outputs, [-1]) # no observation noise Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Y_tf, Yvar_tf = tf(Y, None) self.assertTrue(tf.training) self.assertTrue(torch.allclose(Y_tf[..., 1], torch.log(Y[..., 1]))) self.assertTrue(torch.allclose(Y_tf[..., 0], Y[..., 0])) self.assertIsNone(Yvar_tf) tf.eval() self.assertFalse(tf.training) Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf) torch.allclose(Y_utf, Y) self.assertIsNone(Yvar_utf) # subset_output with self.assertRaises(NotImplementedError): tf_subset = tf.subset_output(idcs=[0]) # with observation noise tf = Log(outputs=outputs) Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Yvar = 1e-8 + torch.rand( *batch_shape, 3, m, device=self.device, dtype=dtype ) with self.assertRaises(NotImplementedError): tf(Y, Yvar) # error on untransform_posterior with self.assertRaises(NotImplementedError): tf.untransform_posterior(None) # test subset_output with positive on subset of outcomes (pos. index) tf = Log(outputs=[0]) Y_tf, Yvar_tf = tf(Y, None) tf_subset = tf.subset_output(idcs=[0]) Y_tf_subset, Yvar_tf_subset = tf_subset(Y[..., [0]], None) self.assertTrue(torch.equal(Y_tf_subset, Y_tf[..., [0]])) self.assertIsNone(Yvar_tf_subset)