def get_list_surrogate() -> Surrogate: return ListSurrogate( botorch_submodel_class_per_outcome={"m": get_model_type()}, submodel_options_per_outcome={"m": {"some_option": "some_value"}}, submodel_options={"shared_option": "shared_option_value"}, mll_class=get_mll_type(), )
def test_construct_shared_shortcut_options(self, mock_construct_inputs): surrogate = ListSurrogate( botorch_submodel_class=self.botorch_submodel_class_per_outcome[ self.outcomes[0]], submodel_options={"shared_option": True}, submodel_options_per_outcome={ outcome: { "individual_option": f"val_{idx}" } for idx, outcome in enumerate(self.outcomes) }, ) surrogate.construct( training_data=self.training_data, task_features=self.task_features, metric_names=self.outcomes, ) # 2 submodels should've been constructed, both of type `botorch_submodel_class`. self.assertEqual(len(mock_construct_inputs.call_args_list), 2) first_call_args, second_call_args = mock_construct_inputs.call_args_list for idx in range(len(mock_construct_inputs.call_args_list)): self.assertEqual( mock_construct_inputs.call_args_list[idx][1], { "fidelity_features": [], "individual_option": f"val_{idx}", "shared_option": True, "task_features": [0], "training_data": self.training_data[idx], }, )
def test_extract_training_data(self): self.assertEqual( # Base `Surrogate` case. self.acquisition._extract_training_data(surrogate=self.surrogate), self.training_data, ) # `ListSurrogate` case. list_surrogate = ListSurrogate(botorch_submodel_class=self.botorch_model_class) list_surrogate._training_data_per_outcome = {"a": self.training_data} self.assertEqual( self.acquisition._extract_training_data(surrogate=list_surrogate), list_surrogate._training_data_per_outcome, )
def setUp(self): self.outcomes = ["outcome_1", "outcome_2"] self.mll_class = SumMarginalLogLikelihood self.dtype = torch.float self.search_space_digest = SearchSpaceDigest(feature_names=[], bounds=[], task_features=[0]) self.task_features = [0] Xs1, Ys1, Yvars1, bounds, _, _, _ = get_torch_test_data( dtype=self.dtype, task_features=self.search_space_digest.task_features) Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data( dtype=self.dtype, task_features=self.search_space_digest.task_features) self.botorch_submodel_class_per_outcome = { self.outcomes[0]: choose_model_class(Yvars=Yvars1, search_space_digest=self.search_space_digest), self.outcomes[1]: choose_model_class(Yvars=Yvars2, search_space_digest=self.search_space_digest), } self.expected_submodel_type = FixedNoiseMultiTaskGP for submodel_cls in self.botorch_submodel_class_per_outcome.values(): self.assertEqual(submodel_cls, FixedNoiseMultiTaskGP) self.Xs = Xs1 + Xs2 self.Ys = Ys1 + Ys2 self.Yvars = Yvars1 + Yvars2 self.training_data = [ TrainingData(X=X, Y=Y, Yvar=Yvar) for X, Y, Yvar in zip(self.Xs, self.Ys, self.Yvars) ] self.submodel_options_per_outcome = { self.outcomes[0]: { RANK: 1 }, self.outcomes[1]: { RANK: 2 }, } self.surrogate = ListSurrogate( botorch_submodel_class_per_outcome=self. botorch_submodel_class_per_outcome, mll_class=self.mll_class, submodel_options_per_outcome=self.submodel_options_per_outcome, ) self.bounds = [(0.0, 1.0), (1.0, 4.0)] self.feature_names = ["x1", "x2"]
def test_fit(self, mock_fit_gpytorch, mock_MLL, mock_state_dict): surrogate = ListSurrogate( botorch_submodel_class_per_outcome=self. botorch_submodel_class_per_outcome, mll_class=SumMarginalLogLikelihood, ) # Checking that model is None before `fit` (and `construct`) calls. self.assertIsNone(surrogate._model) # Should instantiate mll and `fit_gpytorch_model` when `state_dict` # is `None`. surrogate.fit( training_data=self.training_data, search_space_digest=SearchSpaceDigest( feature_names=self.feature_names, bounds=self.bounds, task_features=self.task_features, ), metric_names=self.outcomes, ) mock_state_dict.assert_not_called() mock_MLL.assert_called_once() mock_fit_gpytorch.assert_called_once() mock_state_dict.reset_mock() mock_MLL.reset_mock() mock_fit_gpytorch.reset_mock() # Should `load_state_dict` when `state_dict` is not `None` # and `refit` is `False`. state_dict = {"state_attribute": "value"} surrogate.fit( training_data=self.training_data, search_space_digest=SearchSpaceDigest( feature_names=self.feature_names, bounds=self.bounds, task_features=self.task_features, ), metric_names=self.outcomes, refit=False, state_dict=state_dict, ) mock_state_dict.assert_called_once() mock_MLL.assert_not_called() mock_fit_gpytorch.assert_not_called()
def _autoset_surrogate( self, Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], task_features: List[int], fidelity_features: List[int], metric_names: List[str], ) -> None: """Sets a default surrogate on this model if one was not explicitly provided. """ # To determine whether to use `ListSurrogate`, we need to check for # the batched multi-output case, so we first see which model would # be chosen given the Yvars and the properties of data. botorch_model_class = choose_model_class( Yvars=Yvars, task_features=task_features, fidelity_features=fidelity_features, ) if use_model_list(Xs=Xs, botorch_model_class=botorch_model_class): # If using `ListSurrogate` / `ModelListGP`, pick submodels for each # outcome. botorch_submodel_class_per_outcome = { metric_name: choose_model_class( Yvars=[Yvar], task_features=task_features, fidelity_features=fidelity_features, ) for Yvar, metric_name in zip(Yvars, metric_names) } self._surrogate = ListSurrogate( botorch_submodel_class_per_outcome= botorch_submodel_class_per_outcome, **self.surrogate_options, ) else: # Using regular `Surrogate`, so botorch model picked at the beginning # of the function is the one we should use. self._surrogate = Surrogate( botorch_model_class=botorch_model_class, **self.surrogate_options)
def test_construct_per_outcome_options_no_Yvar(self, _): surrogate = ListSurrogate( botorch_submodel_class=MultiTaskGP, mll_class=self.mll_class, submodel_options_per_outcome=self.submodel_options_per_outcome, ) # Test that splitting the training data works correctly when Yvar is None. training_data_no_Yvar = TrainingData(Xs=self.Xs, Ys=self.Ys) surrogate.construct( training_data=training_data_no_Yvar, task_features=self.task_features, metric_names=self.outcomes, ) self.assertTrue( all( trd.Yvar is None for trd in surrogate.training_data_per_outcome.values() ) ) self.assertEqual(len(surrogate.training_data_per_outcome), 2)