Beispiel #1
0
 def setUp(self):
     qNEI_input_constructor = get_acqf_input_constructor(qNoisyExpectedImprovement)
     self.mock_input_constructor = mock.MagicMock(
         qNEI_input_constructor, side_effect=qNEI_input_constructor
     )
     # Adding wrapping here to be able to count calls and inspect arguments.
     _register_acqf_input_constructor(
         acqf_cls=DummyACQFClass,
         input_constructor=self.mock_input_constructor,
     )
     self.botorch_model_class = SingleTaskGP
     self.surrogate = Surrogate(botorch_model_class=self.botorch_model_class)
     self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
     self.Y = torch.tensor([[3.0], [4.0]])
     self.Yvar = torch.tensor([[0.0], [2.0]])
     self.training_data = TrainingData.from_block_design(
         X=self.X, Y=self.Y, Yvar=self.Yvar
     )
     self.fidelity_features = [2]
     self.surrogate.construct(
         training_data=self.training_data, fidelity_features=self.fidelity_features
     )
     self.search_space_digest = SearchSpaceDigest(
         feature_names=["a", "b", "c"],
         bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
         target_fidelities={2: 1.0},
     )
     self.botorch_acqf_class = DummyACQFClass
     self.objective_weights = torch.tensor([1.0])
     self.objective_thresholds = None
     self.pending_observations = [torch.tensor([[1.0, 3.0, 4.0]])]
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
     self.linear_constraints = None
     self.fixed_features = {1: 2.0}
     self.options = {"best_f": 0.0}
     self.acquisition = Acquisition(
         botorch_acqf_class=self.botorch_acqf_class,
         surrogate=self.surrogate,
         search_space_digest=self.search_space_digest,
         objective_weights=self.objective_weights,
         objective_thresholds=self.objective_thresholds,
         pending_observations=self.pending_observations,
         outcome_constraints=self.outcome_constraints,
         linear_constraints=self.linear_constraints,
         fixed_features=self.fixed_features,
         options=self.options,
     )
     self.inequality_constraints = [
         (torch.tensor([0, 1]), torch.tensor([-1.0, 1.0]), 1)
     ]
     self.rounding_func = lambda x: x
     self.optimizer_options = {Keys.NUM_RESTARTS: 40, Keys.RAW_SAMPLES: 1024}
Beispiel #2
0
 def test_construct_inputs(self):
     for batch_shape, dtype in itertools.product(
         (torch.Size(), torch.Size([2])), (torch.float, torch.double)):
         tkwargs = {"device": self.device, "dtype": dtype}
         model, model_kwargs = self._get_model_and_data(
             batch_shape=batch_shape, m=2, **tkwargs)
         training_data = TrainingData.from_block_design(
             X=model_kwargs["train_X"], Y=model_kwargs["train_Y"])
         data_dict = model.construct_inputs(training_data)
         self.assertTrue(
             torch.equal(data_dict["train_X"], model_kwargs["train_X"]))
         self.assertTrue(
             torch.equal(data_dict["train_Y"], model_kwargs["train_Y"]))
Beispiel #3
0
 def test_FixedNoiseMultiTaskGP_construct_inputs(self):
     for dtype in (torch.float, torch.double):
         tkwargs = {"device": self.device, "dtype": dtype}
         (
             model,
             train_X,
             train_Y,
             train_Yvar,
         ) = _get_fixed_noise_model_and_training_data(**tkwargs)
         td_no_Yvar = TrainingData.from_block_design(X=train_X, Y=train_Y)
         # Test that Yvar is required.
         with self.assertRaisesRegex(ValueError, "Yvar required"):
             model.construct_inputs(td_no_Yvar)
         training_data = TrainingData.from_block_design(
             X=train_X, Y=train_Y, Yvar=train_Yvar
         )
         # Test that task features are required.
         with self.assertRaisesRegex(ValueError, "`task_features` required"):
             model.construct_inputs(training_data)
         # Validate prior config.
         with self.assertRaisesRegex(
             ValueError, ".* only config for LKJ prior is supported"
         ):
             data_dict = model.construct_inputs(
                 training_data,
                 task_features=[0],
                 prior_config={"use_LKJ_prior": False},
             )
         data_dict = model.construct_inputs(
             training_data,
             task_features=[0],
             prior_config={"use_LKJ_prior": True, "eta": 0.6},
         )
         self.assertTrue(torch.equal(data_dict["train_X"], train_X))
         self.assertTrue(torch.equal(data_dict["train_Y"], train_Y))
         self.assertTrue(torch.equal(data_dict["train_Yvar"], train_Yvar))
         self.assertEqual(data_dict["task_feature"], 0)
         self.assertIsInstance(data_dict["task_covar_prior"], LKJCovariancePrior)
Beispiel #4
0
 def test_construct_inputs(self):
     d, m = 3, 1
     for batch_shape, ncat, dtype in itertools.product(
         (torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)
     ):
         tkwargs = {"device": self.device, "dtype": dtype}
         train_X, train_Y = _get_random_data(
             batch_shape=batch_shape, m=m, d=d, **tkwargs
         )
         cat_dims = list(range(ncat))
         training_data = TrainingData.from_block_design(X=train_X, Y=train_Y)
         kwarg_dict = MixedSingleTaskGP.construct_inputs(
             training_data, categorical_features=cat_dims
         )
         self.assertTrue(torch.equal(kwarg_dict["train_X"], train_X))
         self.assertTrue(torch.equal(kwarg_dict["train_Y"], train_Y))
         self.assertEqual(kwarg_dict["cat_dims"], cat_dims)
         self.assertIsNone(kwarg_dict["likelihood"])
Beispiel #5
0
 def test_MultiTaskGP_construct_inputs(self):
     for dtype in (torch.float, torch.double):
         tkwargs = {"device": self.device, "dtype": dtype}
         model, train_X, train_Y = _get_model_and_training_data(**tkwargs)
         training_data = TrainingData.from_block_design(X=train_X, Y=train_Y)
         # Test that task features are required.
         with self.assertRaisesRegex(ValueError, "`task_features` required"):
             model.construct_inputs(training_data)
         # Validate prior config.
         with self.assertRaisesRegex(
             ValueError, ".* only config for LKJ prior is supported"
         ):
             data_dict = model.construct_inputs(
                 training_data,
                 task_features=[0],
                 prior_config={"use_LKJ_prior": False},
             )
         # Validate eta.
         with self.assertRaisesRegex(ValueError, "eta must be a real number"):
             data_dict = model.construct_inputs(
                 training_data,
                 task_features=[0],
                 prior_config={"use_LKJ_prior": True, "eta": "not_number"},
             )
         # Test that presence of `prior` and `prior_config` kwargs at the
         # same time causes error.
         with self.assertRaisesRegex(ValueError, ".* one of `prior` and `prior_"):
             data_dict = model.construct_inputs(
                 training_data,
                 task_features=[0],
                 task_covar_prior=1,
                 prior_config={"use_LKJ_prior": True, "eta": "not_number"},
             )
         data_dict = model.construct_inputs(
             training_data,
             task_features=[0],
             prior_config={"use_LKJ_prior": True, "eta": 0.6},
         )
         self.assertTrue(torch.equal(data_dict["train_X"], train_X))
         self.assertTrue(torch.equal(data_dict["train_Y"], train_Y))
         self.assertEqual(data_dict["task_feature"], 0)
         self.assertIsInstance(data_dict["task_covar_prior"], LKJCovariancePrior)
Beispiel #6
0
 def test_construct_inputs(self):
     for infer_noise, dtype in itertools.product(
         (True, False), (torch.float, torch.double)
     ):
         tkwargs = {"device": self.device, "dtype": dtype}
         train_X, train_Y, train_Yvar, model = self._get_data_and_model(
             infer_noise=infer_noise, **tkwargs
         )
         training_data = TrainingData.from_block_design(
             X=train_X,
             Y=train_Y,
             Yvar=train_Yvar,
         )
         data_dict = model.construct_inputs(training_data)
         if infer_noise:
             self.assertTrue("train_Yvar" not in data_dict)
         else:
             self.assertTrue(torch.equal(data_dict["train_Yvar"], train_Yvar))
         self.assertTrue(torch.equal(data_dict["train_X"], train_X))
         self.assertTrue(torch.equal(data_dict["train_Y"], train_Y))
 def test_construct_inputs(self):
     for (iteration_fidelity, data_fidelity) in self.FIDELITY_TEST_PAIRS:
         for batch_shape, m, dtype, lin_trunc in itertools.product(
             (torch.Size(), torch.Size([2])),
             (1, 2),
             (torch.float, torch.double),
             (False, True),
         ):
             tkwargs = {"device": self.device, "dtype": dtype}
             model, model_kwargs = self._get_model_and_data(
                 iteration_fidelity=iteration_fidelity,
                 data_fidelity=data_fidelity,
                 batch_shape=batch_shape,
                 m=m,
                 lin_truncated=lin_trunc,
                 **tkwargs,
             )
             # len(Xs) == len(Ys) == 1
             training_data = TrainingData.from_block_design(
                 X=model_kwargs["train_X"],
                 Y=model_kwargs["train_Y"],
                 Yvar=torch.full_like(model_kwargs["train_Y"], 0.01),
             )
             # missing fidelity features
             with self.assertRaises(ValueError):
                 model.construct_inputs(training_data)
             data_dict = model.construct_inputs(training_data,
                                                fidelity_features=[1])
             self.assertTrue("train_Yvar" not in data_dict)
             self.assertTrue("data_fidelity" in data_dict)
             self.assertEqual(data_dict["data_fidelity"], 1)
             data_dict = model.construct_inputs(training_data,
                                                fidelity_features=[1])
             self.assertTrue(
                 torch.equal(data_dict["train_X"], model_kwargs["train_X"]))
             self.assertTrue(
                 torch.equal(data_dict["train_Y"], model_kwargs["train_Y"]))
Beispiel #8
0
    def test_TrainingData(self):

        # block design, without variance observations
        X_bd = torch.rand(2, 4, 3)
        Y_bd = torch.rand(2, 4, 2)
        training_data_bd = TrainingData.from_block_design(X_bd, Y_bd)
        self.assertTrue(training_data_bd.is_block_design)
        self.assertTrue(torch.equal(training_data_bd.X, X_bd))
        self.assertTrue(torch.equal(training_data_bd.Y, Y_bd))
        self.assertIsNone(training_data_bd.Yvar)
        self.assertTrue(torch.equal(Xi, X_bd) for Xi in training_data_bd.Xs)
        self.assertTrue(torch.equal(training_data_bd.Ys[0], Y_bd[..., :1]))
        self.assertTrue(torch.equal(training_data_bd.Ys[1], Y_bd[..., 1:]))
        self.assertIsNone(training_data_bd.Yvars)
        # test equality check with null Yvars and one-element Xs ans Ys
        self.assertEqual(
            training_data_bd,
            TrainingData(Xs=[X_bd] * 2, Ys=list(torch.split(Y_bd, 1, dim=-1))),
        )

        # block design, with variance observations
        Yvar_bd = torch.rand(2, 4, 2)
        training_data_bd = TrainingData.from_block_design(X_bd, Y_bd, Yvar_bd)
        self.assertTrue(training_data_bd.is_block_design)
        self.assertTrue(torch.equal(training_data_bd.X, X_bd))
        self.assertTrue(torch.equal(training_data_bd.Y, Y_bd))
        self.assertTrue(torch.equal(training_data_bd.Yvar, Yvar_bd))
        self.assertTrue(torch.equal(Xi, X_bd) for Xi in training_data_bd.Xs)
        self.assertTrue(torch.equal(training_data_bd.Ys[0], Y_bd[..., :1]))
        self.assertTrue(torch.equal(training_data_bd.Ys[1], Y_bd[..., 1:]))
        self.assertTrue(
            torch.equal(training_data_bd.Yvars[0], Yvar_bd[..., :1]))
        self.assertTrue(
            torch.equal(training_data_bd.Yvars[1], Yvar_bd[..., 1:]))

        # test equality check with non-null Yvars and one-element Xs ans Ys
        self.assertEqual(
            training_data_bd,
            TrainingData(
                Xs=[X_bd] * 2,
                Ys=list(torch.split(Y_bd, 1, dim=-1)),
                Yvars=list(torch.split(Yvar_bd, 1, dim=-1)),
            ),
        )

        # non-block design, without variance observations
        Xs = [torch.rand(2, 4, 3), torch.rand(2, 3, 3)]
        Ys = [torch.rand(2, 4, 2), torch.rand(2, 3, 2)]
        training_data_nbd = TrainingData(Xs, Ys)
        self.assertFalse(training_data_nbd.is_block_design)
        self.assertTrue(torch.equal(training_data_nbd.Xs[0], Xs[0]))
        self.assertTrue(torch.equal(training_data_nbd.Xs[1], Xs[1]))
        self.assertTrue(torch.equal(training_data_nbd.Ys[0], Ys[0]))
        self.assertTrue(torch.equal(training_data_nbd.Ys[1], Ys[1]))
        self.assertIsNone(training_data_nbd.Yvars)
        with self.assertRaises(UnsupportedError):
            training_data_nbd.X
        with self.assertRaises(UnsupportedError):
            training_data_nbd.Y
        self.assertIsNone(training_data_nbd.Yvar)

        # test equality check with different length Xs and Ys in two training data
        # and only one training data including non-null Yvars
        self.assertNotEqual(training_data_nbd, training_data_bd)
        # test equality of two training datas with different legth Xs/Ys
        training_data_nbd_X = TrainingData(
            Xs=Xs + [torch.rand(2, 2, 3)],
            Ys=Ys,
        )
        self.assertNotEqual(training_data_nbd, training_data_nbd_X)
        training_data_nbd_Y = TrainingData(
            Xs=Xs,
            Ys=Ys + [torch.rand(2, 2, 2)],
        )
        self.assertNotEqual(training_data_nbd, training_data_nbd_Y)

        # non-block design, with variance observations
        Yvars = [torch.rand(2, 4, 2), torch.rand(2, 3, 2)]
        training_data_nbd_yvar = TrainingData(Xs, Ys, Yvars)
        self.assertFalse(training_data_nbd_yvar.is_block_design)
        self.assertTrue(torch.equal(training_data_nbd_yvar.Xs[0], Xs[0]))
        self.assertTrue(torch.equal(training_data_nbd_yvar.Xs[1], Xs[1]))
        self.assertTrue(torch.equal(training_data_nbd_yvar.Ys[0], Ys[0]))
        self.assertTrue(torch.equal(training_data_nbd_yvar.Ys[1], Ys[1]))
        self.assertTrue(torch.equal(training_data_nbd_yvar.Yvars[0], Yvars[0]))
        self.assertTrue(torch.equal(training_data_nbd_yvar.Yvars[1], Yvars[1]))
        with self.assertRaises(UnsupportedError):
            training_data_nbd_yvar.X
        with self.assertRaises(UnsupportedError):
            training_data_nbd_yvar.Y
        with self.assertRaises(UnsupportedError):
            training_data_nbd_yvar.Yvar

        # test equality check with same length Xs and Ys in two training data but
        # with variance observations only in one
        self.assertNotEqual(training_data_nbd, training_data_nbd_yvar)
        # test equality check with different length Xs and Ys in two training data
        self.assertNotEqual(training_data_nbd_yvar, training_data_bd)

        # implicit block design, without variance observations
        X = torch.rand(2, 4, 3)
        Xs = [X] * 2
        Ys = [torch.rand(2, 4, 2), torch.rand(2, 4, 2)]
        training_data = TrainingData(Xs, Ys)
        self.assertTrue(training_data.is_block_design)
        self.assertTrue(torch.equal(training_data.X, X))
        self.assertTrue(torch.equal(training_data.Y, torch.cat(Ys, dim=-1)))
        self.assertIsNone(training_data.Yvar)
        self.assertTrue(torch.equal(training_data.Xs[0], X))
        self.assertTrue(torch.equal(training_data.Xs[1], X))
        self.assertTrue(torch.equal(training_data.Ys[0], Ys[0]))
        self.assertTrue(torch.equal(training_data.Ys[1], Ys[1]))
        self.assertIsNone(training_data.Yvars)

        # implicit block design, with variance observations
        Yvars = [torch.rand(2, 4, 2), torch.rand(2, 4, 2)]
        training_data = TrainingData(Xs, Ys, Yvars)
        self.assertTrue(training_data.is_block_design)
        self.assertTrue(torch.equal(training_data.X, X))
        self.assertTrue(torch.equal(training_data.Y, torch.cat(Ys, dim=-1)))
        self.assertTrue(
            torch.equal(training_data.Yvar, torch.cat(Yvars, dim=-1)))
        self.assertTrue(torch.equal(training_data.Xs[0], X))
        self.assertTrue(torch.equal(training_data.Xs[1], X))
        self.assertTrue(torch.equal(training_data.Ys[0], Ys[0]))
        self.assertTrue(torch.equal(training_data.Ys[1], Ys[1]))
        self.assertTrue(torch.equal(training_data.Yvars[0], Yvars[0]))
        self.assertTrue(torch.equal(training_data.Yvars[1], Yvars[1]))

        # test equality with same Xs and Ys but different-length Yvars
        self.assertNotEqual(
            TrainingData(Xs, Ys, Yvars),
            TrainingData(Xs, Ys, Yvars[:1]),
        )
Beispiel #9
0
    def test_update(self, mock_fit_gpytorch, mock_MLL, mock_state_dict):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        # Check that correct arguments are passed to `fit`.
        with patch(f"{SURROGATE_PATH}.Surrogate.fit") as mock_fit:
            # Call `fit` by default
            self.surrogate.update(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                refit=self.refit,
                state_dict={"key": "val"},
            )
            mock_fit.assert_called_with(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                candidate_metadata=None,
                refit=self.refit,
                state_dict={"key": "val"},
            )

        # Check that the training data is correctly passed through to the
        # BoTorch `Model`.
        Xs, Ys, Yvars, bounds, _, _, _ = get_torch_test_data(dtype=self.dtype,
                                                             offset=1.0)
        training_data = TrainingData.from_block_design(X=Xs[0],
                                                       Y=Ys[0],
                                                       Yvar=Yvars[0])
        surrogate_kwargs = self.botorch_model_class.construct_inputs(
            training_data)
        self.surrogate.update(
            training_data=training_data,
            search_space_digest=self.search_space_digest,
            metric_names=self.metric_names,
            refit=self.refit,
            state_dict={"key": "val"},
        )
        self.assertTrue(
            torch.equal(
                self.surrogate.model.train_inputs[0],
                surrogate_kwargs.get("train_X"),
            ))
        self.assertTrue(
            torch.equal(
                self.surrogate.model.train_targets,
                surrogate_kwargs.get("train_Y").squeeze(1),
            ))

        # If should not be reconstructed, check that error is raised.
        self.surrogate._constructed_manually = True
        with self.assertRaisesRegex(NotImplementedError,
                                    ".* constructed manually"):
            self.surrogate.update(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                refit=self.refit,
            )