def test_batched_to_model_list(self): for dtype in (torch.float, torch.double): # test SingleTaskGP train_X = torch.rand(10, 2, device=self.device, dtype=dtype) train_Y1 = train_X.sum(dim=-1) train_Y2 = train_X[:, 0] - train_X[:, 1] train_Y = torch.stack([train_Y1, train_Y2], dim=-1) batch_gp = SingleTaskGP(train_X, train_Y) list_gp = batched_to_model_list(batch_gp) self.assertIsInstance(list_gp, ModelListGP) # test FixedNoiseGP batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y)) list_gp = batched_to_model_list(batch_gp) self.assertIsInstance(list_gp, ModelListGP) # test SingleTaskMultiFidelityGP for lin_trunc in (False, True): batch_gp = SingleTaskMultiFidelityGP( train_X, train_Y, iteration_fidelity=1, linear_truncated=lin_trunc) list_gp = batched_to_model_list(batch_gp) self.assertIsInstance(list_gp, ModelListGP) # test HeteroskedasticSingleTaskGP batch_gp = HeteroskedasticSingleTaskGP(train_X, train_Y, torch.rand_like(train_Y)) with self.assertRaises(NotImplementedError): batched_to_model_list(batch_gp) # test with transforms input_tf = Normalize( d=2, bounds=torch.tensor([[0.0, 0.0], [1.0, 1.0]], device=self.device, dtype=dtype), ) octf = Standardize(m=2) batch_gp = SingleTaskGP(train_X, train_Y, outcome_transform=octf, input_transform=input_tf) list_gp = batched_to_model_list(batch_gp) for i, m in enumerate(list_gp.models): self.assertIsInstance(m.input_transform, Normalize) self.assertTrue( torch.equal(m.input_transform.bounds, input_tf.bounds)) self.assertIsInstance(m.outcome_transform, Standardize) self.assertEqual(m.outcome_transform._m, 1) expected_octf = octf.subset_output(idcs=[i]) for attr_name in ["means", "stdvs", "_stdvs_sq"]: self.assertTrue( torch.equal( m.outcome_transform.__getattr__(attr_name), expected_octf.__getattr__(attr_name), ))
def test_standardize(self): # test error on incompatible dim tf = Standardize(m=1) with self.assertRaises(RuntimeError): tf(torch.zeros(3, 2, device=self.device), None) # test error on incompatible batch shape with self.assertRaises(RuntimeError): tf(torch.zeros(2, 3, 1, device=self.device), None) ms = (1, 2) batch_shapes = (torch.Size(), torch.Size([2])) dtypes = (torch.float, torch.double) # test transform, untransform, untransform_posterior for m, batch_shape, dtype in itertools.product(ms, batch_shapes, dtypes): # test init tf = Standardize(m=m, batch_shape=batch_shape) self.assertTrue(tf.training) self.assertEqual(tf._m, m) self.assertIsNone(tf._outputs) self.assertEqual(tf._batch_shape, batch_shape) self.assertEqual(tf._min_stdv, 1e-8) # no observation noise Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Y_tf, Yvar_tf = tf(Y, None) self.assertTrue(tf.training) self.assertTrue(torch.all(Y_tf.mean(dim=-2).abs() < 1e-4)) self.assertIsNone(Yvar_tf) tf.eval() self.assertFalse(tf.training) Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf) torch.allclose(Y_utf, Y) self.assertIsNone(Yvar_utf) # subset_output tf_subset = tf.subset_output(idcs=[0]) Y_tf_subset, Yvar_tf_subset = tf_subset(Y[..., [0]]) self.assertTrue(torch.equal(Y_tf[..., [0]], Y_tf_subset)) self.assertIsNone(Yvar_tf_subset) with self.assertRaises(RuntimeError): tf.subset_output(idcs=[0, 1, 2]) # with observation noise tf = Standardize(m=m, batch_shape=batch_shape) Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Yvar = 1e-8 + torch.rand( *batch_shape, 3, m, device=self.device, dtype=dtype ) Y_tf, Yvar_tf = tf(Y, Yvar) self.assertTrue(tf.training) self.assertTrue(torch.all(Y_tf.mean(dim=-2).abs() < 1e-4)) Yvar_tf_expected = Yvar / Y.std(dim=-2, keepdim=True) ** 2 self.assertTrue(torch.allclose(Yvar_tf, Yvar_tf_expected)) tf.eval() self.assertFalse(tf.training) Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf) torch.allclose(Y_utf, Y) torch.allclose(Yvar_utf, Yvar) # untransform_posterior for interleaved, lazy in itertools.product((True, False), (True, False)): if m == 1 and interleaved: # interleave has no meaning for m=1 continue shape = batch_shape + torch.Size([3, m]) posterior = _get_test_posterior( shape, device=self.device, dtype=dtype, interleaved=interleaved, lazy=lazy, ) p_utf = tf.untransform_posterior(posterior) self.assertEqual(p_utf.device.type, self.device.type) self.assertTrue(p_utf.dtype == dtype) mean_expected = tf.means + tf.stdvs * posterior.mean variance_expected = tf.stdvs ** 2 * posterior.variance self.assertTrue(torch.allclose(p_utf.mean, mean_expected)) self.assertTrue(torch.allclose(p_utf.variance, variance_expected)) samples = p_utf.rsample() self.assertEqual(samples.shape, torch.Size([1]) + shape) samples = p_utf.rsample(sample_shape=torch.Size([4])) self.assertEqual(samples.shape, torch.Size([4]) + shape) samples2 = p_utf.rsample(sample_shape=torch.Size([4, 2])) self.assertEqual(samples2.shape, torch.Size([4, 2]) + shape) # TODO: Test expected covar (both interleaved and non-interleaved) # untransform_posterior for non-GPyTorch posterior posterior2 = TransformedPosterior( posterior=posterior, sample_transform=lambda s: s, mean_transform=lambda m, v: m, variance_transform=lambda m, v: v, ) p_utf2 = tf.untransform_posterior(posterior2) self.assertEqual(p_utf2.device.type, self.device.type) self.assertTrue(p_utf2.dtype == dtype) mean_expected = tf.means + tf.stdvs * posterior.mean variance_expected = tf.stdvs ** 2 * posterior.variance self.assertTrue(torch.allclose(p_utf2.mean, mean_expected)) self.assertTrue(torch.allclose(p_utf2.variance, variance_expected)) # TODO: Test expected covar (both interleaved and non-interleaved) samples = p_utf2.rsample() self.assertEqual(samples.shape, torch.Size([1]) + shape) samples = p_utf2.rsample(sample_shape=torch.Size([4])) self.assertEqual(samples.shape, torch.Size([4]) + shape) samples2 = p_utf2.rsample(sample_shape=torch.Size([4, 2])) self.assertEqual(samples2.shape, torch.Size([4, 2]) + shape) # test error on incompatible output dimension tf_big = Standardize(m=4).eval() with self.assertRaises(RuntimeError): tf_big.untransform_posterior(posterior2) # test transforming a subset of outcomes for batch_shape, dtype in itertools.product(batch_shapes, dtypes): m = 2 outputs = [-1] # test init tf = Standardize(m=m, outputs=outputs, batch_shape=batch_shape) self.assertTrue(tf.training) self.assertEqual(tf._m, m) self.assertEqual(tf._outputs, [1]) self.assertEqual(tf._batch_shape, batch_shape) self.assertEqual(tf._min_stdv, 1e-8) # no observation noise Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Y_tf, Yvar_tf = tf(Y, None) self.assertTrue(tf.training) Y_tf_mean = Y_tf.mean(dim=-2) self.assertTrue(torch.all(Y_tf_mean[..., 1].abs() < 1e-4)) self.assertTrue(torch.allclose(Y_tf_mean[..., 0], Y.mean(dim=-2)[..., 0])) self.assertIsNone(Yvar_tf) tf.eval() self.assertFalse(tf.training) Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf) torch.allclose(Y_utf, Y) self.assertIsNone(Yvar_utf) # subset_output tf_subset = tf.subset_output(idcs=[0]) Y_tf_subset, Yvar_tf_subset = tf_subset(Y[..., [0]]) self.assertTrue(torch.equal(Y_tf[..., [0]], Y_tf_subset)) self.assertIsNone(Yvar_tf_subset) with self.assertRaises(RuntimeError): tf.subset_output(idcs=[0, 1, 2]) # with observation noise tf = Standardize(m=m, outputs=outputs, batch_shape=batch_shape) Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype) Yvar = 1e-8 + torch.rand( *batch_shape, 3, m, device=self.device, dtype=dtype ) Y_tf, Yvar_tf = tf(Y, Yvar) self.assertTrue(tf.training) Y_tf_mean = Y_tf.mean(dim=-2) self.assertTrue(torch.all(Y_tf_mean[..., 1].abs() < 1e-4)) self.assertTrue(torch.allclose(Y_tf_mean[..., 0], Y.mean(dim=-2)[..., 0])) Yvar_tf_expected = Yvar / Y.std(dim=-2, keepdim=True) ** 2 self.assertTrue(torch.allclose(Yvar_tf[..., 1], Yvar_tf_expected[..., 1])) self.assertTrue(torch.allclose(Yvar_tf[..., 0], Yvar[..., 0])) tf.eval() self.assertFalse(tf.training) Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf) torch.allclose(Y_utf, Y) torch.allclose(Yvar_utf, Yvar) # error on untransform_posterior with self.assertRaises(NotImplementedError): tf.untransform_posterior(None)