def _get_model_and_data(
     self,
     iteration_fidelity,
     data_fidelity,
     batch_shape,
     m,
     lin_truncated,
     outcome_transform=None,
     input_transform=None,
     **tkwargs,
 ):
     n_fidelity = (iteration_fidelity is not None) + (data_fidelity
                                                      is not None)
     train_X, train_Y = _get_random_data_with_fidelity(
         batch_shape=batch_shape, m=m, n_fidelity=n_fidelity, **tkwargs)
     train_Yvar = torch.full_like(train_Y, 0.01)
     model_kwargs = {
         "train_X": train_X,
         "train_Y": train_Y,
         "train_Yvar": train_Yvar,
         "iteration_fidelity": iteration_fidelity,
         "data_fidelity": data_fidelity,
         "linear_truncated": lin_truncated,
     }
     if outcome_transform is not None:
         model_kwargs["outcome_transform"] = outcome_transform
     if input_transform is not None:
         model_kwargs["input_transform"] = input_transform
     model = FixedNoiseMultiFidelityGP(**model_kwargs)
     return model, model_kwargs
 def test_init_error(self):
     train_X = torch.rand(2, 2, device=self.device)
     train_Y = torch.rand(2, 1)
     train_Yvar = torch.full_like(train_Y, 0.01)
     for lin_truncated in (True, False):
         with self.assertRaises(UnsupportedError):
             FixedNoiseMultiFidelityGP(
                 train_X, train_Y, train_Yvar, linear_truncated=lin_truncated
             )