Exemplo n.º 1
0
 def test_batched_to_model_list(self):
     for dtype in (torch.float, torch.double):
         # test SingleTaskGP
         train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1)
         train_Y2 = train_X[:, 0] - train_X[:, 1]
         train_Y = torch.stack([train_Y1, train_Y2], dim=-1)
         batch_gp = SingleTaskGP(train_X, train_Y)
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test FixedNoiseGP
         batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test SingleTaskMultiFidelityGP
         for lin_trunc in (False, True):
             batch_gp = SingleTaskMultiFidelityGP(
                 train_X, train_Y, iteration_fidelity=1, linear_truncated=lin_trunc
             )
             list_gp = batched_to_model_list(batch_gp)
             self.assertIsInstance(list_gp, ModelListGP)
         # test HeteroskedasticSingleTaskGP
         batch_gp = HeteroskedasticSingleTaskGP(
             train_X, train_Y, torch.rand_like(train_Y)
         )
         with self.assertRaises(NotImplementedError):
             batched_to_model_list(batch_gp)
Exemplo n.º 2
0
 def _getBatchedModel(
     self, kind="SingleTaskGP", double=False, outcome_transform=False
 ):
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze(
         -1
     )
     noise = torch.tensor(NOISE, device=self.device, dtype=dtype)
     train_y1 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y2 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y = torch.cat([train_y1, train_y2], dim=-1)
     kwargs = {}
     if outcome_transform:
         kwargs["outcome_transform"] = Standardize(m=2)
     if kind == "SingleTaskGP":
         model = SingleTaskGP(train_x, train_y, **kwargs)
     elif kind == "FixedNoiseGP":
         model = FixedNoiseGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     elif kind == "HeteroskedasticSingleTaskGP":
         model = HeteroskedasticSingleTaskGP(
             train_x, train_y, 0.1 * torch.ones_like(train_y), **kwargs
         )
     else:
         raise NotImplementedError
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=self.device, dtype=dtype)
 def test_is_noiseless(self):
     x = torch.zeros(1, 1)
     y = torch.zeros(1, 1)
     se = torch.zeros(1, 1)
     model = SingleTaskGP(x, y)
     self.assertTrue(is_noiseless(model))
     model = HeteroskedasticSingleTaskGP(x, y, se)
     self.assertFalse(is_noiseless(model))
     with self.assertRaises(ModelError):
         is_noiseless(ModelListGP())
Exemplo n.º 4
0
 def test_batched_to_model_list(self):
     for dtype in (torch.float, torch.double):
         # test SingleTaskGP
         train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1)
         train_Y2 = train_X[:, 0] - train_X[:, 1]
         train_Y = torch.stack([train_Y1, train_Y2], dim=-1)
         batch_gp = SingleTaskGP(train_X, train_Y)
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test FixedNoiseGP
         batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test SingleTaskMultiFidelityGP
         for lin_trunc in (False, True):
             batch_gp = SingleTaskMultiFidelityGP(
                 train_X,
                 train_Y,
                 iteration_fidelity=1,
                 linear_truncated=lin_trunc)
             list_gp = batched_to_model_list(batch_gp)
             self.assertIsInstance(list_gp, ModelListGP)
         # test HeteroskedasticSingleTaskGP
         batch_gp = HeteroskedasticSingleTaskGP(train_X, train_Y,
                                                torch.rand_like(train_Y))
         with self.assertRaises(NotImplementedError):
             batched_to_model_list(batch_gp)
         # test with transforms
         input_tf = Normalize(
             d=2,
             bounds=torch.tensor([[0.0, 0.0], [1.0, 1.0]],
                                 device=self.device,
                                 dtype=dtype),
         )
         octf = Standardize(m=2)
         batch_gp = SingleTaskGP(train_X,
                                 train_Y,
                                 outcome_transform=octf,
                                 input_transform=input_tf)
         list_gp = batched_to_model_list(batch_gp)
         for i, m in enumerate(list_gp.models):
             self.assertIsInstance(m.input_transform, Normalize)
             self.assertTrue(
                 torch.equal(m.input_transform.bounds, input_tf.bounds))
             self.assertIsInstance(m.outcome_transform, Standardize)
             self.assertEqual(m.outcome_transform._m, 1)
             expected_octf = octf.subset_output(idcs=[i])
             for attr_name in ["means", "stdvs", "_stdvs_sq"]:
                 self.assertTrue(
                     torch.equal(
                         m.outcome_transform.__getattr__(attr_name),
                         expected_octf.__getattr__(attr_name),
                     ))
Exemplo n.º 5
0
 def testSubsetModel(self):
     x = torch.zeros(1, 1)
     y = torch.zeros(1, 2)
     model = SingleTaskGP(x, y)
     self.assertEqual(model.num_outputs, 2)
     # basic test, can subset
     obj_weights = torch.tensor([1.0, 0.0])
     model_sub, obj_weights_sub, ocs_sub = subset_model(model, obj_weights)
     self.assertIsNone(ocs_sub)
     self.assertEqual(model_sub.num_outputs, 1)
     self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
     # basic test, cannot subset
     obj_weights = torch.tensor([1.0, 2.0])
     model_sub, obj_weights_sub, ocs_sub = subset_model(model, obj_weights)
     self.assertIsNone(ocs_sub)
     self.assertIs(model_sub, model)  # check identity
     self.assertIs(obj_weights_sub, obj_weights)  # check identity
     # test w/ outcome constraints, can subset
     obj_weights = torch.tensor([1.0, 0.0])
     ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
     model_sub, obj_weights_sub, ocs_sub = subset_model(
         model, obj_weights, ocs)
     self.assertEqual(model_sub.num_outputs, 1)
     self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
     self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
     self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
     # test w/ outcome constraints, cannot subset
     obj_weights = torch.tensor([1.0, 0.0])
     ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
     model_sub, obj_weights_sub, ocs_sub = subset_model(
         model, obj_weights, ocs)
     self.assertIs(model_sub, model)  # check identity
     self.assertIs(obj_weights_sub, obj_weights)  # check identity
     self.assertIs(ocs_sub, ocs)  # check identity
     # test unsupported
     yvar = torch.ones(1, 2)
     model = HeteroskedasticSingleTaskGP(x, y, yvar)
     model_sub, obj_weights_sub, ocs = subset_model(model, obj_weights)
     self.assertIsNone(ocs)
     self.assertIs(model_sub, model)  # check identity
     self.assertIs(obj_weights_sub, obj_weights)  # check identity
     # test error on size inconsistency
     obj_weights = torch.ones(3)
     with self.assertRaises(RuntimeError):
         subset_model(model, obj_weights)
Exemplo n.º 6
0
 def test_batched_to_model_list(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # test SingleTaskGP
         train_X = torch.rand(10, 2, device=device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1)
         train_Y2 = train_X[:, 0] - train_X[:, 1]
         train_Y = torch.stack([train_Y1, train_Y2], dim=-1)
         batch_gp = SingleTaskGP(train_X, train_Y)
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test FixedNoiseGP
         batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test HeteroskedasticSingleTaskGP
         batch_gp = HeteroskedasticSingleTaskGP(train_X, train_Y,
                                                torch.rand_like(train_Y))
         with self.assertRaises(NotImplementedError):
             batched_to_model_list(batch_gp)
Exemplo n.º 7
0
 def test_batched_to_model_list(self):
     for dtype in (torch.float, torch.double):
         # test SingleTaskGP
         train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1)
         train_Y2 = train_X[:, 0] - train_X[:, 1]
         train_Y = torch.stack([train_Y1, train_Y2], dim=-1)
         batch_gp = SingleTaskGP(train_X, train_Y)
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test FixedNoiseGP
         batch_gp = FixedNoiseGP(train_X, train_Y, torch.rand_like(train_Y))
         list_gp = batched_to_model_list(batch_gp)
         self.assertIsInstance(list_gp, ModelListGP)
         # test SingleTaskMultiFidelityGP
         for lin_trunc in (False, True):
             batch_gp = SingleTaskMultiFidelityGP(
                 train_X,
                 train_Y,
                 iteration_fidelity=1,
                 linear_truncated=lin_trunc)
             list_gp = batched_to_model_list(batch_gp)
             self.assertIsInstance(list_gp, ModelListGP)
         # test HeteroskedasticSingleTaskGP
         batch_gp = HeteroskedasticSingleTaskGP(train_X, train_Y,
                                                torch.rand_like(train_Y))
         with self.assertRaises(NotImplementedError):
             batched_to_model_list(batch_gp)
         # test input transform
         input_tf = Normalize(
             d=2,
             bounds=torch.tensor([[0.0, 0.0], [1.0, 1.0]],
                                 device=self.device,
                                 dtype=dtype),
         )
         batch_gp = SingleTaskGP(train_X, train_Y, input_transform=input_tf)
         list_gp = batched_to_model_list(batch_gp)
         for m in list_gp.models:
             self.assertIsInstance(m.input_transform, Normalize)
             self.assertTrue(
                 torch.equal(m.input_transform.bounds, input_tf.bounds))
Exemplo n.º 8
0
 def _getBatchedModel(self, kind="SingleTaskGP", double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device,
                              dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y1 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y2 = torch.sin(train_x * (2 * math.pi)) + noise
     train_y = torch.cat([train_y1, train_y2], dim=-1)
     if kind == "SingleTaskGP":
         model = SingleTaskGP(train_x, train_y)
     elif kind == "FixedNoiseGP":
         model = FixedNoiseGP(train_x, train_y,
                              0.1 * torch.ones_like(train_y))
     elif kind == "HeteroskedasticSingleTaskGP":
         model = HeteroskedasticSingleTaskGP(train_x, train_y,
                                             0.1 * torch.ones_like(train_y))
     else:
         raise NotImplementedError
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
Exemplo n.º 9
0
    def test_model_list_to_batched(self):
        for dtype in (torch.float, torch.double):
            # basic test
            train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
            train_Y1 = train_X.sum(dim=-1, keepdim=True)
            train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
            gp1 = SingleTaskGP(train_X, train_Y1)
            gp2 = SingleTaskGP(train_X, train_Y2)
            list_gp = ModelListGP(gp1, gp2)
            batch_gp = model_list_to_batched(list_gp)
            self.assertIsInstance(batch_gp, SingleTaskGP)
            # test degenerate (single model)
            batch_gp = model_list_to_batched(ModelListGP(gp1))
            self.assertEqual(batch_gp._num_outputs, 1)
            # test different model classes
            gp2 = FixedNoiseGP(train_X, train_Y1, torch.ones_like(train_Y1))
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # test non-batched models
            gp1_ = SimpleGPyTorchModel(train_X, train_Y1)
            gp2_ = SimpleGPyTorchModel(train_X, train_Y2)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1_, gp2_))
            # test list of multi-output models
            train_Y = torch.cat([train_Y1, train_Y2], dim=-1)
            gp2 = SingleTaskGP(train_X, train_Y)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # test different training inputs
            gp2 = SingleTaskGP(2 * train_X, train_Y2)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # check scalar agreement
            gp2 = SingleTaskGP(train_X, train_Y2)
            gp2.likelihood.noise_covar.noise_prior.rate.fill_(1.0)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # check tensor shape agreement
            gp2 = SingleTaskGP(train_X, train_Y2)
            gp2.covar_module.raw_outputscale = torch.nn.Parameter(
                torch.tensor([0.0], device=self.device, dtype=dtype))
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # test HeteroskedasticSingleTaskGP
            gp2 = HeteroskedasticSingleTaskGP(train_X, train_Y1,
                                              torch.ones_like(train_Y1))
            with self.assertRaises(NotImplementedError):
                model_list_to_batched(ModelListGP(gp2))
            # test custom likelihood
            gp2 = SingleTaskGP(train_X,
                               train_Y2,
                               likelihood=GaussianLikelihood())
            with self.assertRaises(NotImplementedError):
                model_list_to_batched(ModelListGP(gp2))
            # test FixedNoiseGP
            train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
            train_Y1 = train_X.sum(dim=-1, keepdim=True)
            train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
            gp1_ = FixedNoiseGP(train_X, train_Y1, torch.rand_like(train_Y1))
            gp2_ = FixedNoiseGP(train_X, train_Y2, torch.rand_like(train_Y2))
            list_gp = ModelListGP(gp1_, gp2_)
            batch_gp = model_list_to_batched(list_gp)
            # test SingleTaskMultiFidelityGP
            gp1_ = SingleTaskMultiFidelityGP(train_X,
                                             train_Y1,
                                             iteration_fidelity=1)
            gp2_ = SingleTaskMultiFidelityGP(train_X,
                                             train_Y2,
                                             iteration_fidelity=1)
            list_gp = ModelListGP(gp1_, gp2_)
            batch_gp = model_list_to_batched(list_gp)
            gp2_ = SingleTaskMultiFidelityGP(train_X,
                                             train_Y2,
                                             iteration_fidelity=2)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)
            # test input transform
            input_tf = Normalize(
                d=2,
                bounds=torch.tensor([[0.0, 0.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
            )
            gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf)
            gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf)
            list_gp = ModelListGP(gp1_, gp2_)
            batch_gp = model_list_to_batched(list_gp)
            self.assertIsInstance(batch_gp.input_transform, Normalize)
            self.assertTrue(
                torch.equal(batch_gp.input_transform.bounds, input_tf.bounds))
            # test different input transforms
            input_tf2 = Normalize(
                d=2,
                bounds=torch.tensor([[-1.0, -1.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
            )
            gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf)
            gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf2)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)

            # test batched input transform
            input_tf2 = Normalize(
                d=2,
                bounds=torch.tensor([[-1.0, -1.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
                batch_shape=torch.Size([3]),
            )
            gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf2)
            gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf2)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)

            # test outcome transform
            octf = Standardize(m=1)
            gp1_ = SingleTaskGP(train_X, train_Y1, outcome_transform=octf)
            gp2_ = SingleTaskGP(train_X, train_Y2, outcome_transform=octf)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)
Exemplo n.º 10
0
    def test_batched_multi_output_to_single_output(self):
        for dtype in (torch.float, torch.double):
            # basic test
            train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
            train_Y = torch.stack(
                [
                    train_X.sum(dim=-1),
                    (train_X[:, 0] - train_X[:, 1]),
                ],
                dim=1,
            )
            batched_mo_model = SingleTaskGP(train_X, train_Y)
            batched_so_model = batched_multi_output_to_single_output(
                batched_mo_model)
            self.assertIsInstance(batched_so_model, SingleTaskGP)
            self.assertEqual(batched_so_model.num_outputs, 1)
            # test non-batched models
            non_batch_model = SimpleGPyTorchModel(train_X, train_Y[:, :1])
            with self.assertRaises(UnsupportedError):
                batched_multi_output_to_single_output(non_batch_model)
            gp2 = HeteroskedasticSingleTaskGP(train_X, train_Y,
                                              torch.ones_like(train_Y))
            with self.assertRaises(NotImplementedError):
                batched_multi_output_to_single_output(gp2)
            # test custom likelihood
            gp2 = SingleTaskGP(train_X,
                               train_Y,
                               likelihood=GaussianLikelihood())
            with self.assertRaises(NotImplementedError):
                batched_multi_output_to_single_output(gp2)
            # test FixedNoiseGP
            train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
            batched_mo_model = FixedNoiseGP(train_X, train_Y,
                                            torch.rand_like(train_Y))
            batched_so_model = batched_multi_output_to_single_output(
                batched_mo_model)
            self.assertIsInstance(batched_so_model, FixedNoiseGP)
            self.assertEqual(batched_so_model.num_outputs, 1)
            # test SingleTaskMultiFidelityGP
            batched_mo_model = SingleTaskMultiFidelityGP(train_X,
                                                         train_Y,
                                                         iteration_fidelity=1)
            batched_so_model = batched_multi_output_to_single_output(
                batched_mo_model)
            self.assertIsInstance(batched_so_model, SingleTaskMultiFidelityGP)
            self.assertEqual(batched_so_model.num_outputs, 1)
            # test input transform
            input_tf = Normalize(
                d=2,
                bounds=torch.tensor([[0.0, 0.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
            )
            batched_mo_model = SingleTaskGP(train_X,
                                            train_Y,
                                            input_transform=input_tf)
            batch_so_model = batched_multi_output_to_single_output(
                batched_mo_model)
            self.assertIsInstance(batch_so_model.input_transform, Normalize)
            self.assertTrue(
                torch.equal(batch_so_model.input_transform.bounds,
                            input_tf.bounds))

            # test batched input transform
            input_tf2 = Normalize(
                d=2,
                bounds=torch.tensor([[-1.0, -1.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
                batch_shape=torch.Size([2]),
            )
            batched_mo_model = SingleTaskGP(train_X,
                                            train_Y,
                                            input_transform=input_tf2)
            batched_so_model = batched_multi_output_to_single_output(
                batched_mo_model)
            self.assertIsInstance(batch_so_model.input_transform, Normalize)
            self.assertTrue(
                torch.equal(batch_so_model.input_transform.bounds,
                            input_tf.bounds))
            # test outcome transform
            batched_mo_model = SingleTaskGP(train_X,
                                            train_Y,
                                            outcome_transform=Standardize(m=2))
            with self.assertRaises(NotImplementedError):
                batched_multi_output_to_single_output(batched_mo_model)
Exemplo n.º 11
0
 def test_model_list_to_batched(self):
     for dtype in (torch.float, torch.double):
         # basic test
         train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1, keepdim=True)
         train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
         gp1 = SingleTaskGP(train_X, train_Y1)
         gp2 = SingleTaskGP(train_X, train_Y2)
         list_gp = ModelListGP(gp1, gp2)
         batch_gp = model_list_to_batched(list_gp)
         self.assertIsInstance(batch_gp, SingleTaskGP)
         # test degenerate (single model)
         batch_gp = model_list_to_batched(ModelListGP(gp1))
         self.assertEqual(batch_gp._num_outputs, 1)
         # test different model classes
         gp2 = FixedNoiseGP(train_X, train_Y1, torch.ones_like(train_Y1))
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # test non-batched models
         gp1_ = SimpleGPyTorchModel(train_X, train_Y1)
         gp2_ = SimpleGPyTorchModel(train_X, train_Y2)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1_, gp2_))
         # test list of multi-output models
         train_Y = torch.cat([train_Y1, train_Y2], dim=-1)
         gp2 = SingleTaskGP(train_X, train_Y)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # test different training inputs
         gp2 = SingleTaskGP(2 * train_X, train_Y2)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # check scalar agreement
         gp2 = SingleTaskGP(train_X, train_Y2)
         gp2.likelihood.noise_covar.noise_prior.rate.fill_(1.0)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # check tensor shape agreement
         gp2 = SingleTaskGP(train_X, train_Y2)
         gp2.covar_module.raw_outputscale = torch.nn.Parameter(
             torch.tensor([0.0], device=self.device, dtype=dtype)
         )
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # test HeteroskedasticSingleTaskGP
         gp2 = HeteroskedasticSingleTaskGP(
             train_X, train_Y1, torch.ones_like(train_Y1)
         )
         with self.assertRaises(NotImplementedError):
             model_list_to_batched(ModelListGP(gp2))
         # test custom likelihood
         gp2 = SingleTaskGP(train_X, train_Y2, likelihood=GaussianLikelihood())
         with self.assertRaises(NotImplementedError):
             model_list_to_batched(ModelListGP(gp2))
         # test FixedNoiseGP
         train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1, keepdim=True)
         train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
         gp1_ = FixedNoiseGP(train_X, train_Y1, torch.rand_like(train_Y1))
         gp2_ = FixedNoiseGP(train_X, train_Y2, torch.rand_like(train_Y2))
         list_gp = ModelListGP(gp1_, gp2_)
         batch_gp = model_list_to_batched(list_gp)