Ejemplo n.º 1
0
 def setUp(self):
     self.botorch_model_class = SingleTaskGP
     self.surrogate = Surrogate(botorch_model_class=self.botorch_model_class)
     self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
     self.Y = torch.tensor([[3.0], [4.0]])
     self.Yvar = torch.tensor([[0.0], [2.0]])
     self.training_data = TrainingData.from_block_design(
         X=self.X, Y=self.Y, Yvar=self.Yvar
     )
     self.fidelity_features = [2]
     self.surrogate.construct(
         training_data=self.training_data, fidelity_features=self.fidelity_features
     )
     self.acquisition_options = {Keys.NUM_FANTASIES: 64}
     self.search_space_digest = SearchSpaceDigest(
         feature_names=["a", "b", "c"],
         bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
         target_fidelities={2: 1.0},
     )
     self.objective_weights = torch.tensor([1.0])
     self.pending_observations = [
         torch.tensor([[1.0, 3.0, 4.0]]),
         torch.tensor([[2.0, 6.0, 8.0]]),
     ]
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
     self.linear_constraints = None
     self.fixed_features = {1: 2.0}
     self.options = {
         Keys.FIDELITY_WEIGHTS: {2: 1.0},
         Keys.COST_INTERCEPT: 1.0,
         Keys.NUM_TRACE_OBSERVATIONS: 0,
     }
Ejemplo n.º 2
0
 def setUp(self):
     self.botorch_model_class = SingleTaskGP
     self.mll_class = ExactMarginalLogLikelihood
     self.device = torch.device("cpu")
     self.dtype = torch.float
     self.Xs, self.Ys, self.Yvars, self.bounds, _, _, _ = get_torch_test_data(
         dtype=self.dtype)
     self.training_data = TrainingData(X=self.Xs[0],
                                       Y=self.Ys[0],
                                       Yvar=self.Yvars[0])
     self.surrogate_kwargs = self.botorch_model_class.construct_inputs(
         self.training_data)
     self.surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class,
         mll_class=self.mll_class)
     self.task_features = []
     self.feature_names = ["x1", "x2"]
     self.metric_names = ["y"]
     self.fidelity_features = []
     self.target_fidelities = {1: 1.0}
     self.fixed_features = {1: 2.0}
     self.refit = True
     self.objective_weights = torch.tensor([-1.0, 1.0],
                                           dtype=self.dtype,
                                           device=self.device)
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                      ]))
     self.linear_constraints = (
         torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
         torch.tensor([[0.5], [1.0]]),
     )
     self.options = {}
Ejemplo n.º 3
0
    def test_construct(self, mock_GP):
        with self.assertRaises(NotImplementedError):
            # Base `Model` does not implement `construct_inputs`.
            Surrogate(botorch_model_class=Model).construct(
                training_data=self.training_data,
                fidelity_features=self.search_space_digest.fidelity_features,
            )
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        mock_GP.assert_called_once()
        call_kwargs = mock_GP.call_args[1]
        self.assertTrue(torch.equal(call_kwargs["train_X"], self.Xs[0]))
        self.assertTrue(torch.equal(call_kwargs["train_Y"], self.Ys[0]))
        self.assertFalse(self.surrogate._constructed_manually)

        # Check that `model_options` passed to the `Surrogate` constructor are
        # properly propagated.
        with patch.object(
                SingleTaskGP,
                "construct_inputs",
                wraps=SingleTaskGP.construct_inputs) as mock_construct_inputs:
            surrogate = Surrogate(
                botorch_model_class=self.botorch_model_class,
                mll_class=self.mll_class,
                model_options={"some_option": "some_value"},
            )
            surrogate.construct(self.training_data)
            mock_construct_inputs.assert_called_with(
                training_data=self.training_data, some_option="some_value")
Ejemplo n.º 4
0
 def setUp(self):
     qNEI_input_constructor = get_acqf_input_constructor(
         qNoisyExpectedImprovement)
     self.mock_input_constructor = mock.MagicMock(
         qNEI_input_constructor, side_effect=qNEI_input_constructor)
     # Adding wrapping here to be able to count calls and inspect arguments.
     _register_acqf_input_constructor(
         acqf_cls=DummyACQFClass,
         input_constructor=self.mock_input_constructor,
     )
     self.botorch_model_class = SingleTaskGP
     self.surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class)
     self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
     self.Y = torch.tensor([[3.0], [4.0]])
     self.Yvar = torch.tensor([[0.0], [2.0]])
     self.training_data = TrainingData.from_block_design(X=self.X,
                                                         Y=self.Y,
                                                         Yvar=self.Yvar)
     self.fidelity_features = [2]
     self.surrogate.construct(training_data=self.training_data,
                              fidelity_features=self.fidelity_features)
     self.search_space_digest = SearchSpaceDigest(
         feature_names=["a", "b", "c"],
         bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
         target_fidelities={2: 1.0},
     )
     self.botorch_acqf_class = DummyACQFClass
     self.objective_weights = torch.tensor([1.0])
     self.objective_thresholds = None
     self.pending_observations = [torch.tensor([[1.0, 3.0, 4.0]])]
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                      ]))
     self.linear_constraints = None
     self.fixed_features = {1: 2.0}
     self.options = {"best_f": 0.0}
     self.acquisition = Acquisition(
         botorch_acqf_class=self.botorch_acqf_class,
         surrogate=self.surrogate,
         search_space_digest=self.search_space_digest,
         objective_weights=self.objective_weights,
         objective_thresholds=self.objective_thresholds,
         pending_observations=self.pending_observations,
         outcome_constraints=self.outcome_constraints,
         linear_constraints=self.linear_constraints,
         fixed_features=self.fixed_features,
         options=self.options,
     )
     self.inequality_constraints = [(torch.tensor([0, 1]),
                                     torch.tensor([-1.0, 1.0]), 1)]
     self.rounding_func = lambda x: x
     self.optimizer_options = {
         Keys.NUM_RESTARTS: 40,
         Keys.RAW_SAMPLES: 1024
     }
Ejemplo n.º 5
0
 def test_construct(self, mock_GP):
     base_surrogate = Surrogate(botorch_model_class=Model)
     with self.assertRaisesRegex(TypeError,
                                 "Cannot construct an abstract model."):
         base_surrogate.construct(
             training_data=self.training_data,
             fidelity_features=self.fidelity_features,
         )
     self.surrogate.construct(training_data=self.training_data,
                              fidelity_features=self.fidelity_features)
     mock_GP.assert_called_with(train_X=self.X, train_Y=self.Y)
Ejemplo n.º 6
0
 def test_init(self, mock_Likelihood, mock_Kernel):
     self.assertEqual(self.surrogate.botorch_model_class,
                      self.botorch_model_class)
     self.assertEqual(self.surrogate.mll_class, self.mll_class)
     with self.assertRaisesRegex(NotImplementedError,
                                 "Customizing likelihood"):
         Surrogate(botorch_model_class=self.botorch_model_class,
                   likelihood=Likelihood())
     with self.assertRaisesRegex(NotImplementedError, "Customizing kernel"):
         Surrogate(botorch_model_class=self.botorch_model_class,
                   kernel_class=Kernel())
Ejemplo n.º 7
0
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.Y = torch.tensor([[3.0, 4.0, 2.0], [4.0, 3.0, 1.0]])
        self.Yvar = torch.tensor([[0.0, 2.0, 1.0], [2.0, 0.0, 1.0]])
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.fidelity_features = [2]
        self.surrogate.construct(training_data=self.training_data)

        self.bounds = [(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)]
        self.botorch_acqf_class = DummyACQFClass
        self.objective_weights = torch.tensor([1.0, -1.0, 0.0])
        self.objective_thresholds = torch.tensor([2.0, 1.0, float("nan")])
        self.pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[1.0, 3.0, 4.0]]),
        ]
        self.outcome_constraints = (
            torch.tensor([[1.0, 0.5, 0.5]]),
            torch.tensor([[0.5]]),
        )
        self.con_tfs = get_outcome_constraint_transforms(
            self.outcome_constraints)
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.target_fidelities = {2: 1.0}
        self.options = {}
        self.acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        self.inequality_constraints = [(torch.tensor([0, 1]),
                                        torch.tensor([-1.0, 1.0]), 1)]
        self.rounding_func = lambda x: x
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024
        }
Ejemplo n.º 8
0
 def test_mll_options(self, _):
     mock_mll = MagicMock(self.mll_class)
     surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class,
         mll_class=mock_mll,
         mll_options={"some_option": "some_value"},
     )
     surrogate.fit(
         training_data=self.training_data,
         search_space_digest=self.search_space_digest,
         metric_names=self.metric_names,
         refit=self.refit,
     )
     self.assertEqual(mock_mll.call_args[1]["some_option"], "some_value")
Ejemplo n.º 9
0
    def compute_model_dependencies(
        self,
        surrogate: Surrogate,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        target_fidelities: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        options: Optional[Dict[str, Any]] = None,
    ) -> Dict[str, Any]:
        # Compute generic multi-fidelity dependencies first
        dependencies = super().compute_model_dependencies(
            surrogate=surrogate,
            bounds=bounds,
            objective_weights=objective_weights,
            pending_observations=pending_observations,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            options=options,
        )

        _, best_point_acqf_value = surrogate.best_in_sample_point(
            bounds=bounds,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        dependencies.update({Keys.CURRENT_VALUE: best_point_acqf_value})
        return dependencies
Ejemplo n.º 10
0
 def test_construct(self, mock_GP):
     with self.assertRaises(NotImplementedError):
         # Base `Model` does not implement `construct_inputs`.
         Surrogate(botorch_model_class=Model).construct(
             training_data=self.training_data,
             fidelity_features=self.fidelity_features,
         )
     self.surrogate.construct(training_data=self.training_data,
                              fidelity_features=self.fidelity_features)
     mock_GP.assert_called_with(train_X=self.Xs[0], train_Y=self.Ys[0])
Ejemplo n.º 11
0
 def setUp(self):
     self.botorch_model_class = SingleTaskGP
     self.surrogate = Surrogate(botorch_model_class=self.botorch_model_class)
     self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
     self.Y = torch.tensor([[3.0], [4.0]])
     self.Yvar = torch.tensor([[0.0], [2.0]])
     self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
     self.fidelity_features = [2]
     self.surrogate.construct(
         training_data=self.training_data, fidelity_features=self.fidelity_features
     )
     self.search_space_digest = SearchSpaceDigest(
         feature_names=["a", "b", "c"],
         bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
         target_fidelities={2: 1.0},
     )
     self.botorch_acqf_class = DummyACQFClass
     self.objective_weights = torch.tensor([1.0])
     self.objective_thresholds = None
     self.pending_observations = [torch.tensor([[1.0, 3.0, 4.0]])]
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
     self.linear_constraints = None
     self.fixed_features = {1: 2.0}
     self.options = {"best_f": 0.0}
     self.acquisition = Acquisition(
         surrogate=self.surrogate,
         search_space_digest=self.search_space_digest,
         objective_weights=self.objective_weights,
         objective_thresholds=self.objective_thresholds,
         botorch_acqf_class=self.botorch_acqf_class,
         pending_observations=self.pending_observations,
         outcome_constraints=self.outcome_constraints,
         linear_constraints=self.linear_constraints,
         fixed_features=self.fixed_features,
         options=self.options,
     )
     self.inequality_constraints = [
         (torch.tensor([0, 1]), torch.tensor([-1.0, 1.0]), 1)
     ]
     self.rounding_func = lambda x: x
     self.optimizer_options = {Keys.NUM_RESTARTS: 40, Keys.RAW_SAMPLES: 1024}
Ejemplo n.º 12
0
class AcquisitionSetUp:
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.Y = torch.tensor([[3.0], [4.0]])
        self.Yvar = torch.tensor([[0.0], [2.0]])
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.fidelity_features = [2]
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        self.search_space_digest = SearchSpaceDigest(
            feature_names=["a", "b", "c"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
            target_fidelities={2: 1.0},
        )
        self.botorch_acqf_class = qMaxValueEntropy
        self.objective_weights = torch.tensor([1.0])
        self.pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[2.0, 6.0, 8.0]]),
        ]
        self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                         ]))
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.options = {
            Keys.FIDELITY_WEIGHTS: {
                2: 1.0
            },
            Keys.COST_INTERCEPT: 1.0,
            Keys.NUM_TRACE_OBSERVATIONS: 0,
        }
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024,
            Keys.FRAC_RANDOM: 0.2,
        }
        self.inequality_constraints = [(torch.tensor([0, 1]),
                                        torch.tensor([-1.0, 1.0]), 1)]
Ejemplo n.º 13
0
 def setUp(self):
     self.botorch_model_class = SingleTaskGP
     self.surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class)
     self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
     self.Y = torch.tensor([[3.0], [4.0]])
     self.Yvar = torch.tensor([[0.0], [2.0]])
     self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
     self.fidelity_features = [2]
     self.surrogate.construct(training_data=self.training_data,
                              fidelity_features=self.fidelity_features)
     self.acquisition_options = {Keys.NUM_FANTASIES: 64}
     self.search_space_digest = SearchSpaceDigest(
         feature_names=["a", "b", "c"],
         bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
         target_fidelities={2: 1.0},
     )
     self.objective_weights = torch.tensor([1.0])
     self.pending_observations = [
         torch.tensor([[1.0, 3.0, 4.0]]),
         torch.tensor([[2.0, 6.0, 8.0]]),
     ]
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                      ]))
     self.linear_constraints = None
     self.fixed_features = {1: 2.0}
     self.options = {
         Keys.FIDELITY_WEIGHTS: {
             2: 1.0
         },
         Keys.COST_INTERCEPT: 1.0,
         Keys.NUM_TRACE_OBSERVATIONS: 0,
     }
     with patch(f"{MFKG_PATH}.__init__", return_value=None):
         # We don't actually need to instantiate the BoTorch acqf in these tests.
         self.acquisition = MultiFidelityAcquisition(
             surrogate=self.surrogate,
             search_space_digest=self.search_space_digest,
             objective_weights=self.objective_weights,
             botorch_acqf_class=qMultiFidelityKnowledgeGradient,
         )
Ejemplo n.º 14
0
 def test_fit(self, mock_fit_gpytorch, mock_MLL, mock_state_dict):
     surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class,
         mll_class=ExactMarginalLogLikelihood,
     )
     # Checking that model is None before `fit` (and `construct`) calls.
     self.assertIsNone(surrogate._model)
     # Should instantiate mll and `fit_gpytorch_model` when `state_dict`
     # is `None`.
     surrogate.fit(
         training_data=self.training_data,
         search_space_digest=self.search_space_digest,
         metric_names=self.metric_names,
         refit=self.refit,
     )
     mock_state_dict.assert_not_called()
     mock_MLL.assert_called_once()
     mock_fit_gpytorch.assert_called_once()
     mock_state_dict.reset_mock()
     mock_MLL.reset_mock()
     mock_fit_gpytorch.reset_mock()
     # Should `load_state_dict` when `state_dict` is not `None`
     # and `refit` is `False`.
     state_dict = {"state_attribute": "value"}
     surrogate.fit(
         training_data=self.training_data,
         search_space_digest=self.search_space_digest,
         metric_names=self.metric_names,
         refit=False,
         state_dict=state_dict,
     )
     mock_state_dict.assert_called_once()
     mock_MLL.assert_not_called()
     mock_fit_gpytorch.assert_not_called()
Ejemplo n.º 15
0
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.acquisition_class = KnowledgeGradient
        self.botorch_acqf_class = qKnowledgeGradient
        self.acquisition_options = {Keys.NUM_FANTASIES: 64}
        self.model = BoTorchModel(
            surrogate=self.surrogate,
            acquisition_class=self.acquisition_class,
            acquisition_options=self.acquisition_options,
        )

        self.dtype = torch.float
        Xs1, Ys1, Yvars1, self.bounds, _, _, _ = get_torch_test_data(
            dtype=self.dtype)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=self.dtype,
                                                           offset=1.0)
        self.Xs = Xs1 + Xs2
        self.Ys = Ys1 + Ys2
        self.Yvars = Yvars1 + Yvars2
        self.X = Xs1[0]
        self.Y = Ys1[0]
        self.Yvar = Yvars1[0]
        self.X2 = Xs2[0]
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.search_space_digest = SearchSpaceDigest(
            feature_names=["x1", "x2", "x3"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
            task_features=[],
            fidelity_features=[2],
            target_fidelities={1: 1.0},
        )
        self.metric_names = ["y"]
        self.metric_names_for_list_surrogate = ["y1", "y2"]
        self.candidate_metadata = []
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024
        }
        self.model_gen_options = {
            Keys.OPTIMIZER_KWARGS: self.optimizer_options
        }
        self.objective_weights = torch.tensor([1.0])
        self.objective_thresholds = None
        self.outcome_constraints = None
        self.linear_constraints = None
        self.fixed_features = None
        self.pending_observations = None
        self.rounding_func = "func"
Ejemplo n.º 16
0
 def _autoset_surrogate(
     self,
     Xs: List[Tensor],
     Ys: List[Tensor],
     Yvars: List[Tensor],
     task_features: List[int],
     fidelity_features: List[int],
     metric_names: List[str],
 ) -> None:
     """Sets a default surrogate on this model if one was not explicitly
     provided.
     """
     # To determine whether to use `ListSurrogate`, we need to check for
     # the batched multi-output case, so we first see which model would
     # be chosen given the Yvars and the properties of data.
     botorch_model_class = choose_model_class(
         Yvars=Yvars,
         task_features=task_features,
         fidelity_features=fidelity_features,
     )
     if use_model_list(Xs=Xs, botorch_model_class=botorch_model_class):
         # If using `ListSurrogate` / `ModelListGP`, pick submodels for each
         # outcome.
         botorch_submodel_class_per_outcome = {
             metric_name: choose_model_class(
                 Yvars=[Yvar],
                 task_features=task_features,
                 fidelity_features=fidelity_features,
             )
             for Yvar, metric_name in zip(Yvars, metric_names)
         }
         self._surrogate = ListSurrogate(
             botorch_submodel_class_per_outcome=
             botorch_submodel_class_per_outcome,
             **self.surrogate_options,
         )
     else:
         # Using regular `Surrogate`, so botorch model picked at the beginning
         # of the function is the one we should use.
         self._surrogate = Surrogate(
             botorch_model_class=botorch_model_class,
             **self.surrogate_options)
Ejemplo n.º 17
0
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(botorch_model_class=self.botorch_model_class)

        self.acquisition_options = {Keys.NUM_FANTASIES: 64}
        self.bounds = [(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)]
        self.objective_weights = torch.tensor([1.0])
        self.target_fidelities = {2: 1.0}
        self.pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[2.0, 6.0, 8.0]]),
        ]
        self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.options = {
            Keys.FIDELITY_WEIGHTS: {2: 1.0},
            Keys.COST_INTERCEPT: 1.0,
            Keys.NUM_TRACE_OBSERVATIONS: 0,
        }
Ejemplo n.º 18
0
 def test_fit(self, mock_fit_gpytorch, mock_MLL, mock_state_dict):
     surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class,
         mll_class=ExactMarginalLogLikelihood,
     )
     # Checking that model is None before `fit` (and `construct`) calls.
     self.assertIsNone(surrogate._model)
     # Should instantiate mll and `fit_gpytorch_model` when `state_dict`
     # is `None`.
     surrogate.fit(
         training_data=self.training_data,
         search_space_digest=self.search_space_digest,
         metric_names=self.metric_names,
         refit=self.refit,
     )
     # Check that training data is correctly passed through to the
     # BoTorch `Model`.
     self.assertTrue(
         torch.equal(
             surrogate.model.train_inputs[0],
             self.surrogate_kwargs.get("train_X"),
         )
     )
     self.assertTrue(
         torch.equal(
             surrogate.model.train_targets,
             self.surrogate_kwargs.get("train_Y").squeeze(1),
         )
     )
     mock_state_dict.assert_not_called()
     mock_MLL.assert_called_once()
     mock_fit_gpytorch.assert_called_once()
     mock_state_dict.reset_mock()
     mock_MLL.reset_mock()
     mock_fit_gpytorch.reset_mock()
     # Should `load_state_dict` when `state_dict` is not `None`
     # and `refit` is `False`.
     state_dict = {"state_attribute": "value"}
     surrogate.fit(
         training_data=self.training_data,
         search_space_digest=self.search_space_digest,
         metric_names=self.metric_names,
         refit=False,
         state_dict=state_dict,
     )
     mock_state_dict.assert_called_once()
     mock_MLL.assert_not_called()
     mock_fit_gpytorch.assert_not_called()
Ejemplo n.º 19
0
def get_surrogate() -> Surrogate:
    return Surrogate(
        botorch_model_class=get_model_type(),
        mll_class=get_mll_type(),
        model_options={"some_option": "some_value"},
    )
Ejemplo n.º 20
0
class MultiFidelityAcquisitionTest(TestCase):
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.Y = torch.tensor([[3.0], [4.0]])
        self.Yvar = torch.tensor([[0.0], [2.0]])
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.fidelity_features = [2]
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        self.acquisition_options = {Keys.NUM_FANTASIES: 64}
        self.bounds = [(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)]
        self.objective_weights = torch.tensor([1.0])
        self.target_fidelities = {2: 1.0}
        self.pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[2.0, 6.0, 8.0]]),
        ]
        self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                         ]))
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.options = {
            Keys.FIDELITY_WEIGHTS: {
                2: 1.0
            },
            Keys.COST_INTERCEPT: 1.0,
            Keys.NUM_TRACE_OBSERVATIONS: 0,
        }
        with patch(f"{MFKG_PATH}.__init__", return_value=None):
            # We don't actually need to instantiate the BoTorch acqf in these tests.
            self.acquisition = MultiFidelityAcquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                botorch_acqf_class=qMultiFidelityKnowledgeGradient,
                target_fidelities=self.target_fidelities,
            )

    @patch(f"{ACQUISITION_PATH}.Acquisition.__init__", return_value=None)
    @patch(f"{ACQUISITION_PATH}.Acquisition.optimize")
    def test_optimize(self, mock_Acquisition_optimize, mock_Acquisition_init):
        # `self.acquisition.optimize()` should call `Acquisition.optimize()`
        # once.
        self.acquisition.optimize(bounds=self.bounds, n=1)
        mock_Acquisition_optimize.assert_called_once()

    @patch(f"{ACQUISITION_PATH}.Acquisition.compute_model_dependencies",
           return_value={})
    @patch(f"{MULTI_FIDELITY_PATH}.AffineFidelityCostModel",
           return_value="cost_model")
    @patch(f"{MULTI_FIDELITY_PATH}.InverseCostWeightedUtility",
           return_value=None)
    @patch(f"{MULTI_FIDELITY_PATH}.project_to_target_fidelity",
           return_value=None)
    @patch(f"{MULTI_FIDELITY_PATH}.expand_trace_observations",
           return_value=None)
    def test_compute_model_dependencies(
        self,
        mock_expand,
        mock_project,
        mock_inverse_utility,
        mock_affine_model,
        mock_Acquisition_compute,
    ):
        # Raise Error if `fidelity_weights` and `target_fidelities` do
        # not align.
        with self.assertRaisesRegex(RuntimeError,
                                    "Must provide the same indices"):
            self.acquisition.compute_model_dependencies(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                target_fidelities={1: 5.0},
                pending_observations=self.pending_observations,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                options=self.options,
            )
        # Make sure `fidelity_weights` are set when they are not passed in.
        self.acquisition.compute_model_dependencies(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            target_fidelities={
                2: 5.0,
                3: 5.0
            },
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options={
                Keys.COST_INTERCEPT: 1.0,
                Keys.NUM_TRACE_OBSERVATIONS: 0
            },
        )
        mock_affine_model.assert_called_with(fidelity_weights={
            2: 1.0,
            3: 1.0
        },
                                             fixed_cost=1.0)
        # Usual case.
        dependencies = self.acquisition.compute_model_dependencies(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            target_fidelities=self.target_fidelities,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
        )
        mock_Acquisition_compute.assert_called_with(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            target_fidelities=self.target_fidelities,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
        )
        mock_affine_model.assert_called_with(
            fidelity_weights=self.options[Keys.FIDELITY_WEIGHTS],
            fixed_cost=self.options[Keys.COST_INTERCEPT],
        )
        mock_inverse_utility.assert_called_with(cost_model="cost_model")
        self.assertTrue(Keys.COST_AWARE_UTILITY in dependencies)
        self.assertTrue(Keys.PROJECT in dependencies)
        self.assertTrue(Keys.EXPAND in dependencies)
        # Check that `project` and `expand` are defined correctly.
        project = dependencies.get(Keys.PROJECT)
        project(torch.tensor([1.0]))
        mock_project.assert_called_with(
            X=torch.tensor([1.0]), target_fidelities=self.target_fidelities)
        expand = dependencies.get(Keys.EXPAND)
        expand(torch.tensor([1.0]))
        mock_expand.assert_called_with(
            X=torch.tensor([1.0]),
            fidelity_dims=sorted(self.target_fidelities),
            num_trace_obs=self.options.get(Keys.NUM_TRACE_OBSERVATIONS),
        )
Ejemplo n.º 21
0
class AcquisitionTest(TestCase):
    def setUp(self):
        qNEI_input_constructor = get_acqf_input_constructor(qNoisyExpectedImprovement)
        self.mock_input_constructor = mock.MagicMock(
            qNEI_input_constructor, side_effect=qNEI_input_constructor
        )
        # Adding wrapping here to be able to count calls and inspect arguments.
        _register_acqf_input_constructor(
            acqf_cls=DummyACQFClass,
            input_constructor=self.mock_input_constructor,
        )
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(botorch_model_class=self.botorch_model_class)
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.Y = torch.tensor([[3.0], [4.0]])
        self.Yvar = torch.tensor([[0.0], [2.0]])
        self.training_data = TrainingData.from_block_design(
            X=self.X, Y=self.Y, Yvar=self.Yvar
        )
        self.fidelity_features = [2]
        self.surrogate.construct(
            training_data=self.training_data, fidelity_features=self.fidelity_features
        )
        self.search_space_digest = SearchSpaceDigest(
            feature_names=["a", "b", "c"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
            target_fidelities={2: 1.0},
        )
        self.botorch_acqf_class = DummyACQFClass
        self.objective_weights = torch.tensor([1.0])
        self.objective_thresholds = None
        self.pending_observations = [torch.tensor([[1.0, 3.0, 4.0]])]
        self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.options = {"best_f": 0.0}
        self.acquisition = Acquisition(
            botorch_acqf_class=self.botorch_acqf_class,
            surrogate=self.surrogate,
            search_space_digest=self.search_space_digest,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
        )
        self.inequality_constraints = [
            (torch.tensor([0, 1]), torch.tensor([-1.0, 1.0]), 1)
        ]
        self.rounding_func = lambda x: x
        self.optimizer_options = {Keys.NUM_RESTARTS: 40, Keys.RAW_SAMPLES: 1024}

    def tearDown(self):
        # Avoid polluting the registry for other tests.
        ACQF_INPUT_CONSTRUCTOR_REGISTRY.pop(DummyACQFClass)

    @mock.patch(f"{ACQUISITION_PATH}._get_X_pending_and_observed")
    @mock.patch(
        f"{ACQUISITION_PATH}.subset_model",
        return_value=SubsetModelData(None, torch.ones(1), None, None, None),
    )
    @mock.patch(f"{ACQUISITION_PATH}.get_botorch_objective_and_transform")
    @mock.patch(
        f"{CURRENT_PATH}.Acquisition.compute_model_dependencies",
        return_value={"current_value": 1.2},
    )
    @mock.patch(
        f"{DummyACQFClass.__module__}.DummyACQFClass.__init__", return_value=None
    )
    def test_init(
        self,
        mock_botorch_acqf_class,
        mock_compute_model_deps,
        mock_get_objective_and_transform,
        mock_subset_model,
        mock_get_X,
    ):
        with self.assertRaisesRegex(TypeError, ".* missing .* 'botorch_acqf_class'"):
            Acquisition(
                surrogate=self.surrogate,
                search_space_digest=self.search_space_digest,
                objective_weights=self.objective_weights,
            )

        botorch_objective = LinearMCObjective(weights=torch.tensor([1.0]))
        mock_get_objective_and_transform.return_value = (botorch_objective, None)
        mock_get_X.return_value = (self.pending_observations[0], self.X[:1])
        acquisition = Acquisition(
            surrogate=self.surrogate,
            search_space_digest=self.search_space_digest,
            objective_weights=self.objective_weights,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
            objective_thresholds=self.objective_thresholds,
        )

        # Check `_get_X_pending_and_observed` kwargs
        mock_get_X.assert_called_with(
            Xs=[self.training_data.X],
            pending_observations=self.pending_observations,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            bounds=self.search_space_digest.bounds,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
        )
        # Call `subset_model` only when needed
        mock_subset_model.assert_called_with(
            model=acquisition.surrogate.model,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            objective_thresholds=self.objective_thresholds,
        )
        mock_subset_model.reset_mock()
        mock_get_objective_and_transform.reset_mock()
        self.mock_input_constructor.reset_mock()
        mock_botorch_acqf_class.reset_mock()
        self.options[Keys.SUBSET_MODEL] = False
        acquisition = Acquisition(
            surrogate=self.surrogate,
            search_space_digest=self.search_space_digest,
            objective_weights=self.objective_weights,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
        )
        mock_subset_model.assert_not_called()
        # Check `get_botorch_objective_and_transform` kwargs
        mock_get_objective_and_transform.assert_called_once()
        _, ckwargs = mock_get_objective_and_transform.call_args
        self.assertIs(ckwargs["model"], self.acquisition.surrogate.model)
        self.assertIs(ckwargs["objective_weights"], self.objective_weights)
        self.assertIs(ckwargs["outcome_constraints"], self.outcome_constraints)
        self.assertTrue(torch.equal(ckwargs["X_observed"], self.X[:1]))
        # Check final `acqf` creation
        model_deps = {Keys.CURRENT_VALUE: 1.2}
        self.mock_input_constructor.assert_called_once()
        mock_botorch_acqf_class.assert_called_once()
        _, ckwargs = self.mock_input_constructor.call_args
        self.assertIs(ckwargs["model"], self.acquisition.surrogate.model)
        self.assertIs(ckwargs["objective"], botorch_objective)
        self.assertTrue(torch.equal(ckwargs["X_pending"], self.pending_observations[0]))
        for k, v in chain(self.options.items(), model_deps.items()):
            self.assertEqual(ckwargs[k], v)

    @mock.patch(f"{ACQUISITION_PATH}.optimize_acqf")
    def test_optimize(self, mock_optimize_acqf):
        self.acquisition.optimize(
            n=3,
            search_space_digest=self.search_space_digest,
            inequality_constraints=self.inequality_constraints,
            fixed_features=self.fixed_features,
            rounding_func=self.rounding_func,
            optimizer_options=self.optimizer_options,
        )
        mock_optimize_acqf.assert_called_with(
            acq_function=self.acquisition.acqf,
            bounds=mock.ANY,
            q=3,
            inequality_constraints=self.inequality_constraints,
            fixed_features=self.fixed_features,
            post_processing_func=self.rounding_func,
            **self.optimizer_options,
        )
        # can't use assert_called_with on bounds due to ambiguous bool comparison
        expected_bounds = torch.tensor(
            self.search_space_digest.bounds,
            dtype=self.acquisition.dtype,
            device=self.acquisition.device,
        ).transpose(0, 1)
        self.assertTrue(
            torch.equal(mock_optimize_acqf.call_args[1]["bounds"], expected_bounds)
        )

    @mock.patch(f"{ACQUISITION_PATH}.optimize_acqf_discrete")
    def test_optimize_discrete(self, mock_optimize_acqf_discrete):
        tkwargs = {
            "dtype": self.acquisition.dtype,
            "device": self.acquisition.device,
        }
        ssd1 = SearchSpaceDigest(
            feature_names=["a"],
            bounds=[(0, 2)],
            categorical_features=[0],
            discrete_choices={0: [0, 1, 2]},
        )
        # check fixed_feature index validation
        with self.assertRaisesRegex(ValueError, "Invalid fixed_feature index"):
            self.acquisition.optimize(
                n=3,
                search_space_digest=ssd1,
                inequality_constraints=self.inequality_constraints,
                fixed_features=self.fixed_features,
                rounding_func=self.rounding_func,
                optimizer_options=self.optimizer_options,
            )
        # check this works without any fixed_feature specified
        self.acquisition.optimize(
            n=3,
            search_space_digest=ssd1,
            inequality_constraints=self.inequality_constraints,
            fixed_features=None,
            rounding_func=self.rounding_func,
            optimizer_options=self.optimizer_options,
        )
        mock_optimize_acqf_discrete.assert_called_with(
            acq_function=self.acquisition.acqf,
            q=3,
            choices=mock.ANY,
            **self.optimizer_options,
        )
        # can't use assert_called_with on choices due to ambiguous bool comparison
        expected_choices = torch.tensor([[0], [1], [2]], **tkwargs)
        self.assertTrue(
            torch.equal(
                mock_optimize_acqf_discrete.call_args[1]["choices"], expected_choices
            )
        )
        # check with fixed feature
        ssd2 = SearchSpaceDigest(
            feature_names=["a", "b"],
            bounds=[(0, 2), (0, 1)],
            categorical_features=[0],
            discrete_choices={0: [0, 1, 2]},
        )
        self.acquisition.optimize(
            n=3,
            search_space_digest=ssd2,
            inequality_constraints=self.inequality_constraints,
            fixed_features=self.fixed_features,
            rounding_func=self.rounding_func,
            optimizer_options=self.optimizer_options,
        )
        mock_optimize_acqf_discrete.assert_called_with(
            acq_function=self.acquisition.acqf,
            q=3,
            choices=mock.ANY,
            **self.optimizer_options,
        )
        # can't use assert_called_with on choices due to ambiguous bool comparison
        expected_choices = torch.tensor([[0, 2.0], [1, 2.0], [2, 2.0]], **tkwargs)
        self.assertTrue(
            torch.equal(
                mock_optimize_acqf_discrete.call_args[1]["choices"], expected_choices
            )
        )

    @mock.patch(f"{ACQUISITION_PATH}.optimize_acqf_mixed")
    def test_optimize_mixed(self, mock_optimize_acqf_mixed):
        tkwargs = {
            "dtype": self.acquisition.dtype,
            "device": self.acquisition.device,
        }
        ssd = SearchSpaceDigest(
            feature_names=["a", "b"],
            bounds=[(0, 1), (0, 2)],
            categorical_features=[1],
            discrete_choices={1: [0, 1, 2]},
        )
        self.acquisition.optimize(
            n=3,
            search_space_digest=ssd,
            inequality_constraints=self.inequality_constraints,
            fixed_features=None,
            rounding_func=self.rounding_func,
            optimizer_options=self.optimizer_options,
        )
        mock_optimize_acqf_mixed.assert_called_with(
            acq_function=self.acquisition.acqf,
            bounds=mock.ANY,
            q=3,
            fixed_features_list=[{1: 0}, {1: 1}, {1: 2}],
            inequality_constraints=self.inequality_constraints,
            post_processing_func=self.rounding_func,
            **self.optimizer_options,
        )
        # can't use assert_called_with on bounds due to ambiguous bool comparison
        expected_bounds = torch.tensor(ssd.bounds, **tkwargs).transpose(0, 1)
        self.assertTrue(
            torch.equal(
                mock_optimize_acqf_mixed.call_args[1]["bounds"], expected_bounds
            )
        )

    @mock.patch(f"{SURROGATE_PATH}.Surrogate.best_in_sample_point")
    def test_best_point(self, mock_best_point):
        self.acquisition.best_point(
            search_space_digest=self.search_space_digest,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
        )
        mock_best_point.assert_called_with(
            search_space_digest=self.search_space_digest,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
        )

    @mock.patch(
        f"{DummyACQFClass.__module__}.DummyACQFClass.__call__", return_value=None
    )
    def test_evaluate(self, mock_call):
        self.acquisition.evaluate(X=self.X)
        mock_call.assert_called_with(X=self.X)

    @mock.patch(f"{ACQUISITION_PATH}._get_X_pending_and_observed")
    def test_init_moo(
        self,
        mock_get_X,
    ):
        moo_training_data = TrainingData(
            Xs=[self.X] * 3,
            Ys=[self.Y] * 3,
            Yvars=[self.Yvar] * 3,
        )
        moo_objective_weights = torch.tensor(
            [-1.0, -1.0, 0.0],
        )
        moo_objective_thresholds = torch.tensor(
            [0.5, 1.5, float("nan")],
        )
        self.surrogate.construct(
            training_data=moo_training_data,
        )
        mock_get_X.return_value = (self.pending_observations[0], self.X[:1])
        outcome_constraints = (
            torch.tensor(
                [[1.0, 0.0, 0.0]],
            ),
            torch.tensor(
                [[10.0]],
            ),
        )

        acquisition = Acquisition(
            surrogate=self.surrogate,
            botorch_acqf_class=qNoisyExpectedHypervolumeImprovement,
            search_space_digest=self.search_space_digest,
            objective_weights=moo_objective_weights,
            pending_observations=self.pending_observations,
            outcome_constraints=outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
            objective_thresholds=moo_objective_thresholds,
        )
        self.assertTrue(
            torch.equal(
                moo_objective_thresholds[:2], acquisition.objective_thresholds[:2]
            )
        )
        self.assertTrue(np.isnan(acquisition.objective_thresholds[2].item()))
        # test inferred objective_thresholds
        with ExitStack() as es:
            preds = torch.tensor(
                [
                    [11.0, 2.0],
                    [9.0, 3.0],
                ],
            )
            es.enter_context(
                mock.patch.object(
                    self.surrogate.model,
                    "posterior",
                    return_value=MockPosterior(
                        mean=preds,
                        samples=preds,
                    ),
                )
            )
            acquisition = Acquisition(
                surrogate=self.surrogate,
                search_space_digest=self.search_space_digest,
                objective_weights=moo_objective_weights,
                botorch_acqf_class=self.botorch_acqf_class,
                pending_observations=self.pending_observations,
                outcome_constraints=outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                options=self.options,
            )
            self.assertTrue(
                torch.equal(
                    acquisition.objective_thresholds[:2], torch.tensor([9.9, 3.3])
                )
            )
            self.assertTrue(np.isnan(acquisition.objective_thresholds[2].item()))
Ejemplo n.º 22
0
DEFAULT_ACQUISITION_OPTIONS = {
    "num_fantasies": 16,
    "num_mv_samples": 10,
    "num_y_samples": 128,
    "candidate_size": 1000,
    "best_f": 0.0,
}
DEFAULT_OPTIMIZER_OPTIONS = {"num_restarts": 40, "raw_samples": 1024}

# BoTorch `Model` and Ax `Acquisition` combinations to be benchmarked.

# All of the single-fidelity models:

# Single Task GP + NEI
single_task_NEI_kwargs = {
    "surrogate": Surrogate(SingleTaskGP),
    "botorch_acqf_class": qNoisyExpectedImprovement,
    "acquisition_options": DEFAULT_ACQUISITION_OPTIONS,
}
# Fixed Noise GP + EI
fixed_noise_EI_kwargs = {
    "surrogate": Surrogate(FixedNoiseGP),
    "botorch_acqf_class": qExpectedImprovement,
    "acquisition_options": DEFAULT_ACQUISITION_OPTIONS,
}
# Single Task GP + KG
single_task_KG_kwargs = {
    "surrogate": Surrogate(SingleTaskGP),
    "acquisition_class": KnowledgeGradient,
    "acquisition_options": DEFAULT_ACQUISITION_OPTIONS,
}
Ejemplo n.º 23
0
 def test_from_botorch(self):
     surrogate = Surrogate.from_botorch(
         self.botorch_model_class(**self.surrogate_kwargs))
     self.assertIsInstance(surrogate.model, self.botorch_model_class)
     self.assertTrue(surrogate._constructed_manually)
Ejemplo n.º 24
0
class MOOAcquisitionTest(TestCase):
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.Y = torch.tensor([[3.0, 4.0, 2.0], [4.0, 3.0, 1.0]])
        self.Yvar = torch.tensor([[0.0, 2.0, 1.0], [2.0, 0.0, 1.0]])
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.fidelity_features = [2]
        self.surrogate.construct(training_data=self.training_data)

        self.bounds = [(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)]
        self.botorch_acqf_class = DummyACQFClass
        self.objective_weights = torch.tensor([1.0, -1.0, 0.0])
        self.objective_thresholds = torch.tensor([2.0, 1.0, float("nan")])
        self.pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[1.0, 3.0, 4.0]]),
        ]
        self.outcome_constraints = (
            torch.tensor([[1.0, 0.5, 0.5]]),
            torch.tensor([[0.5]]),
        )
        self.con_tfs = get_outcome_constraint_transforms(
            self.outcome_constraints)
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.target_fidelities = {2: 1.0}
        self.options = {}
        self.acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        self.inequality_constraints = [(torch.tensor([0, 1]),
                                        torch.tensor([-1.0, 1.0]), 1)]
        self.rounding_func = lambda x: x
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024
        }

    @patch(f"{MOO_ACQUISITION_PATH}.get_outcome_constraint_transforms")
    @patch(f"{ACQUISITION_PATH}._get_X_pending_and_observed")
    @patch(
        f"{ACQUISITION_PATH}.subset_model",
        wraps=acquisition.subset_model,
    )
    @patch(f"{CURRENT_PATH}.MOOAcquisition._get_botorch_objective")
    @patch(f"{DummyACQFClass.__module__}.DummyACQFClass.__init__",
           return_value=None)
    def test_init(
        self,
        mock_botorch_acqf_class,
        mock_get_objective,
        mock_subset_model,
        mock_get_X,
        mock_get_constraints,
    ):
        botorch_objective = WeightedMCMultiOutputObjective(
            weights=self.objective_weights[:2], outcomes=[0, 1])
        mock_get_objective.return_value = botorch_objective
        mock_get_constraints.return_value = self.con_tfs
        mock_get_X.return_value = (self.pending_observations[0], self.X[:1])
        acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        # Check `_get_X_pending_and_observed` kwargs
        mock_get_X.assert_called_with(
            Xs=[
                self.training_data.X, self.training_data.X,
                self.training_data.X
            ],
            pending_observations=self.pending_observations,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            bounds=self.bounds,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
        )
        # Call `subset_model` only when needed
        mock_subset_model.assert_called_with(
            acquisition.surrogate.model,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            objective_thresholds=self.objective_thresholds,
        )
        mock_subset_model.reset_mock()
        mock_botorch_acqf_class.reset_mock()
        self.options[Keys.SUBSET_MODEL] = False
        acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )
        mock_subset_model.assert_not_called()
        # Check final `acqf` creation
        mock_botorch_acqf_class.assert_called_once()
        _, ckwargs = mock_botorch_acqf_class.call_args
        self.assertIs(ckwargs["model"], self.acquisition.surrogate.model)
        self.assertIs(ckwargs["objective"], botorch_objective)
        self.assertTrue(
            torch.equal(ckwargs["X_pending"], self.pending_observations[0]))
        self.assertEqual(
            ckwargs["ref_point"],
            (self.objective_thresholds[:2] *
             self.objective_weights[:2]).tolist(),
        )
        self.assertIsInstance(ckwargs["partitioning"], BoxDecomposition)
        self.assertIs(ckwargs["constraints"], self.con_tfs)
        self.assertIsInstance(ckwargs["sampler"], SobolQMCNormalSampler)

        # qNoisyExpectedImprovement not supported.
        with self.assertRaisesRegex(
                UnsupportedError,
                "Only qExpectedHypervolumeImprovement is currently supported",
        ):
            MOOAcquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                objective_thresholds=self.objective_thresholds,
                botorch_acqf_class=qNoisyExpectedImprovement,
                pending_observations=self.pending_observations,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                target_fidelities=self.target_fidelities,
                options=self.options,
            )

        with self.assertRaisesRegex(ValueError,
                                    "Objective Thresholds required"):
            MOOAcquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                objective_thresholds=None,
                botorch_acqf_class=self.botorch_acqf_class,
                pending_observations=self.pending_observations,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                target_fidelities=self.target_fidelities,
                options=self.options,
            )

    @patch(f"{DummyACQFClass.__module__}.DummyACQFClass.__call__",
           return_value=None)
    def test_evaluate(self, mock_call):
        self.acquisition.evaluate(X=self.X)
        mock_call.assert_called_with(X=self.X)

    def test_extract_training_data(self):
        self.assertEqual(  # Base `Surrogate` case.
            self.acquisition._extract_training_data(surrogate=self.surrogate),
            self.training_data,
        )
        # `ListSurrogate` case.
        list_surrogate = ListSurrogate(
            botorch_submodel_class=self.botorch_model_class)
        list_surrogate._training_data_per_outcome = {"a": self.training_data}
        self.assertEqual(
            self.acquisition._extract_training_data(surrogate=list_surrogate),
            list_surrogate._training_data_per_outcome,
        )
Ejemplo n.º 25
0
class SurrogateTest(TestCase):
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.mll_class = ExactMarginalLogLikelihood
        self.device = torch.device("cpu")
        self.dtype = torch.float
        self.Xs, self.Ys, self.Yvars, self.bounds, _, _, _ = get_torch_test_data(
            dtype=self.dtype)
        self.training_data = TrainingData.from_block_design(X=self.Xs[0],
                                                            Y=self.Ys[0],
                                                            Yvar=self.Yvars[0])
        self.surrogate_kwargs = self.botorch_model_class.construct_inputs(
            self.training_data)
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class,
            mll_class=self.mll_class)
        self.search_space_digest = SearchSpaceDigest(
            feature_names=["x1", "x2"],
            bounds=self.bounds,
            target_fidelities={1: 1.0},
        )
        self.metric_names = ["y"]
        self.fixed_features = {1: 2.0}
        self.refit = True
        self.objective_weights = torch.tensor([-1.0, 1.0],
                                              dtype=self.dtype,
                                              device=self.device)
        self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                         ]))
        self.linear_constraints = (
            torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
            torch.tensor([[0.5], [1.0]]),
        )
        self.options = {}

    @patch(f"{CURRENT_PATH}.Kernel")
    @patch(f"{CURRENT_PATH}.Likelihood")
    def test_init(self, mock_Likelihood, mock_Kernel):
        self.assertEqual(self.surrogate.botorch_model_class,
                         self.botorch_model_class)
        self.assertEqual(self.surrogate.mll_class, self.mll_class)
        with self.assertRaisesRegex(NotImplementedError,
                                    "Customizing likelihood"):
            Surrogate(botorch_model_class=self.botorch_model_class,
                      likelihood=Likelihood())
        with self.assertRaisesRegex(NotImplementedError, "Customizing kernel"):
            Surrogate(botorch_model_class=self.botorch_model_class,
                      kernel_class=Kernel())

    @patch(f"{SURROGATE_PATH}.fit_gpytorch_model")
    def test_mll_options(self, _):
        mock_mll = MagicMock(self.mll_class)
        surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class,
            mll_class=mock_mll,
            mll_options={"some_option": "some_value"},
        )
        surrogate.fit(
            training_data=self.training_data,
            search_space_digest=self.search_space_digest,
            metric_names=self.metric_names,
            refit=self.refit,
        )
        self.assertEqual(mock_mll.call_args[1]["some_option"], "some_value")

    def test_model_property(self):
        with self.assertRaisesRegex(
                ValueError, "BoTorch `Model` has not yet been constructed."):
            self.surrogate.model

    def test_training_data_property(self):
        with self.assertRaisesRegex(
                ValueError,
                "Underlying BoTorch `Model` has not yet received its training_data.",
        ):
            self.surrogate.training_data

    def test_dtype_property(self):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        self.assertEqual(self.dtype, self.surrogate.dtype)

    def test_device_property(self):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        self.assertEqual(self.device, self.surrogate.device)

    def test_from_botorch(self):
        surrogate = Surrogate.from_botorch(
            self.botorch_model_class(**self.surrogate_kwargs))
        self.assertIsInstance(surrogate.model, self.botorch_model_class)
        self.assertTrue(surrogate._constructed_manually)

    @patch(f"{CURRENT_PATH}.SingleTaskGP.__init__", return_value=None)
    def test_construct(self, mock_GP):
        with self.assertRaises(NotImplementedError):
            # Base `Model` does not implement `construct_inputs`.
            Surrogate(botorch_model_class=Model).construct(
                training_data=self.training_data,
                fidelity_features=self.search_space_digest.fidelity_features,
            )
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        mock_GP.assert_called_once()
        call_kwargs = mock_GP.call_args[1]
        self.assertTrue(torch.equal(call_kwargs["train_X"], self.Xs[0]))
        self.assertTrue(torch.equal(call_kwargs["train_Y"], self.Ys[0]))
        self.assertFalse(self.surrogate._constructed_manually)

        # Check that `model_options` passed to the `Surrogate` constructor are
        # properly propagated.
        with patch.object(
                SingleTaskGP,
                "construct_inputs",
                wraps=SingleTaskGP.construct_inputs) as mock_construct_inputs:
            surrogate = Surrogate(
                botorch_model_class=self.botorch_model_class,
                mll_class=self.mll_class,
                model_options={"some_option": "some_value"},
            )
            surrogate.construct(self.training_data)
            mock_construct_inputs.assert_called_with(
                training_data=self.training_data, some_option="some_value")

    @patch(f"{CURRENT_PATH}.SingleTaskGP.load_state_dict", return_value=None)
    @patch(f"{CURRENT_PATH}.ExactMarginalLogLikelihood")
    @patch(f"{SURROGATE_PATH}.fit_gpytorch_model")
    def test_fit(self, mock_fit_gpytorch, mock_MLL, mock_state_dict):
        surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class,
            mll_class=ExactMarginalLogLikelihood,
        )
        # Checking that model is None before `fit` (and `construct`) calls.
        self.assertIsNone(surrogate._model)
        # Should instantiate mll and `fit_gpytorch_model` when `state_dict`
        # is `None`.
        surrogate.fit(
            training_data=self.training_data,
            search_space_digest=self.search_space_digest,
            metric_names=self.metric_names,
            refit=self.refit,
        )
        # Check that training data is correctly passed through to the
        # BoTorch `Model`.
        self.assertTrue(
            torch.equal(
                surrogate.model.train_inputs[0],
                self.surrogate_kwargs.get("train_X"),
            ))
        self.assertTrue(
            torch.equal(
                surrogate.model.train_targets,
                self.surrogate_kwargs.get("train_Y").squeeze(1),
            ))
        mock_state_dict.assert_not_called()
        mock_MLL.assert_called_once()
        mock_fit_gpytorch.assert_called_once()
        mock_state_dict.reset_mock()
        mock_MLL.reset_mock()
        mock_fit_gpytorch.reset_mock()
        # Should `load_state_dict` when `state_dict` is not `None`
        # and `refit` is `False`.
        state_dict = {"state_attribute": "value"}
        surrogate.fit(
            training_data=self.training_data,
            search_space_digest=self.search_space_digest,
            metric_names=self.metric_names,
            refit=False,
            state_dict=state_dict,
        )
        mock_state_dict.assert_called_once()
        mock_MLL.assert_not_called()
        mock_fit_gpytorch.assert_not_called()

    @patch(f"{SURROGATE_PATH}.predict_from_model")
    def test_predict(self, mock_predict):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        self.surrogate.predict(X=self.Xs[0])
        mock_predict.assert_called_with(model=self.surrogate.model,
                                        X=self.Xs[0])

    def test_best_in_sample_point(self):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        # `best_in_sample_point` requires `objective_weights`
        with patch(f"{SURROGATE_PATH}.best_in_sample_point",
                   return_value=None) as mock_best_in_sample:
            with self.assertRaisesRegex(ValueError, "Could not obtain"):
                self.surrogate.best_in_sample_point(
                    search_space_digest=self.search_space_digest,
                    objective_weights=None)
        with patch(f"{SURROGATE_PATH}.best_in_sample_point",
                   return_value=(self.Xs[0], 0.0)) as mock_best_in_sample:
            best_point, observed_value = self.surrogate.best_in_sample_point(
                search_space_digest=self.search_space_digest,
                objective_weights=self.objective_weights,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                options=self.options,
            )
            mock_best_in_sample.assert_called_with(
                Xs=[self.training_data.X],
                model=self.surrogate,
                bounds=self.search_space_digest.bounds,
                objective_weights=self.objective_weights,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                options=self.options,
            )

    @patch(f"{ACQUISITION_PATH}.Acquisition.__init__", return_value=None)
    @patch(
        f"{ACQUISITION_PATH}.Acquisition.optimize",
        return_value=([torch.tensor([0.0])], [torch.tensor([1.0])]),
    )
    @patch(
        f"{SURROGATE_PATH}.pick_best_out_of_sample_point_acqf_class",
        return_value=(qSimpleRegret, {
            Keys.SAMPLER: SobolQMCNormalSampler
        }),
    )
    def test_best_out_of_sample_point(self, mock_best_point_util,
                                      mock_acqf_optimize, mock_acqf_init):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        # currently cannot use function with fixed features
        with self.assertRaisesRegex(NotImplementedError, "Fixed features"):
            self.surrogate.best_out_of_sample_point(
                search_space_digest=self.search_space_digest,
                objective_weights=self.objective_weights,
                fixed_features=self.fixed_features,
            )
        candidate, acqf_value = self.surrogate.best_out_of_sample_point(
            search_space_digest=self.search_space_digest,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            options=self.options,
        )
        mock_acqf_init.assert_called_with(
            surrogate=self.surrogate,
            botorch_acqf_class=qSimpleRegret,
            search_space_digest=self.search_space_digest,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=None,
            options={Keys.SAMPLER: SobolQMCNormalSampler},
        )
        self.assertTrue(torch.equal(candidate, torch.tensor([0.0])))
        self.assertTrue(torch.equal(acqf_value, torch.tensor([1.0])))

    @patch(f"{CURRENT_PATH}.SingleTaskGP.load_state_dict", return_value=None)
    @patch(f"{CURRENT_PATH}.ExactMarginalLogLikelihood")
    @patch(f"{SURROGATE_PATH}.fit_gpytorch_model")
    def test_update(self, mock_fit_gpytorch, mock_MLL, mock_state_dict):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        # Check that correct arguments are passed to `fit`.
        with patch(f"{SURROGATE_PATH}.Surrogate.fit") as mock_fit:
            # Call `fit` by default
            self.surrogate.update(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                refit=self.refit,
                state_dict={"key": "val"},
            )
            mock_fit.assert_called_with(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                candidate_metadata=None,
                refit=self.refit,
                state_dict={"key": "val"},
            )

        # Check that the training data is correctly passed through to the
        # BoTorch `Model`.
        Xs, Ys, Yvars, bounds, _, _, _ = get_torch_test_data(dtype=self.dtype,
                                                             offset=1.0)
        training_data = TrainingData.from_block_design(X=Xs[0],
                                                       Y=Ys[0],
                                                       Yvar=Yvars[0])
        surrogate_kwargs = self.botorch_model_class.construct_inputs(
            training_data)
        self.surrogate.update(
            training_data=training_data,
            search_space_digest=self.search_space_digest,
            metric_names=self.metric_names,
            refit=self.refit,
            state_dict={"key": "val"},
        )
        self.assertTrue(
            torch.equal(
                self.surrogate.model.train_inputs[0],
                surrogate_kwargs.get("train_X"),
            ))
        self.assertTrue(
            torch.equal(
                self.surrogate.model.train_targets,
                surrogate_kwargs.get("train_Y").squeeze(1),
            ))

        # If should not be reconstructed, check that error is raised.
        self.surrogate._constructed_manually = True
        with self.assertRaisesRegex(NotImplementedError,
                                    ".* constructed manually"):
            self.surrogate.update(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                refit=self.refit,
            )

    def test_serialize_attributes_as_kwargs(self):
        expected = self.surrogate.__dict__
        self.assertEqual(self.surrogate._serialize_attributes_as_kwargs(),
                         expected)
Ejemplo n.º 26
0
class SurrogateTest(TestCase):
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.mll_class = ExactMarginalLogLikelihood
        self.device = torch.device("cpu")
        self.dtype = torch.float
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]],
                              dtype=self.dtype,
                              device=self.device)

        self.Y = torch.tensor([[3.0], [4.0]],
                              dtype=self.dtype,
                              device=self.device)
        self.Yvar = torch.tensor([[0.0], [2.0]],
                                 dtype=self.dtype,
                                 device=self.device)

        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.surrogate_kwargs = self.botorch_model_class.construct_inputs(
            self.training_data)
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class,
            mll_class=self.mll_class)
        self.bounds = [(0.0, 1.0), (1.0, 4.0), (2.0, 5.0)]
        self.task_features = []
        self.feature_names = ["x1", "x2", "x3"]
        self.metric_names = ["y"]
        self.fidelity_features = []
        self.target_fidelities = {1: 1.0}
        self.fixed_features = {1: 2.0}
        self.refit = True
        self.objective_weights = torch.tensor([-1.0, 1.0],
                                              dtype=self.dtype,
                                              device=self.device)
        self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                         ]))
        self.linear_constraints = (
            torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
            torch.tensor([[0.5], [1.0]]),
        )
        self.options = {}

    @patch(f"{CURRENT_PATH}.Kernel")
    @patch(f"{CURRENT_PATH}.Likelihood")
    def test_init(self, mock_Likelihood, mock_Kernel):
        self.assertEqual(self.surrogate.botorch_model_class,
                         self.botorch_model_class)
        self.assertEqual(self.surrogate.mll_class, self.mll_class)
        with self.assertRaisesRegex(NotImplementedError,
                                    "Customizing likelihood"):
            Surrogate(botorch_model_class=self.botorch_model_class,
                      likelihood=Likelihood())
        with self.assertRaisesRegex(NotImplementedError, "Customizing kernel"):
            Surrogate(botorch_model_class=self.botorch_model_class,
                      kernel_class=Kernel())

    def test_model_property(self):
        with self.assertRaisesRegex(
                ValueError, "BoTorch `Model` has not yet been constructed."):
            self.surrogate.model

    def test_training_data_property(self):
        with self.assertRaisesRegex(
                ValueError,
                "Underlying BoTorch `Model` has not yet received its training_data.",
        ):
            self.surrogate.training_data

    def test_dtype_property(self):
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        self.assertEqual(self.dtype, self.surrogate.dtype)

    def test_device_property(self):
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        self.assertEqual(self.device, self.surrogate.device)

    def test_from_BoTorch(self):
        surrogate = Surrogate.from_BoTorch(
            self.botorch_model_class(**self.surrogate_kwargs))
        self.assertIsInstance(surrogate.model, self.botorch_model_class)
        self.assertFalse(surrogate._should_reconstruct)

    @patch(f"{CURRENT_PATH}.SingleTaskGP.__init__", return_value=None)
    def test_construct(self, mock_GP):
        base_surrogate = Surrogate(botorch_model_class=Model)
        with self.assertRaisesRegex(TypeError,
                                    "Cannot construct an abstract model."):
            base_surrogate.construct(
                training_data=self.training_data,
                fidelity_features=self.fidelity_features,
            )
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        mock_GP.assert_called_with(train_X=self.X, train_Y=self.Y)

    @patch(f"{CURRENT_PATH}.SingleTaskGP.load_state_dict", return_value=None)
    @patch(f"{CURRENT_PATH}.ExactMarginalLogLikelihood")
    @patch(f"{SURROGATE_PATH}.fit_gpytorch_model")
    def test_fit(self, mock_fit_gpytorch, mock_MLL, mock_state_dict):
        surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class,
            mll_class=ExactMarginalLogLikelihood,
        )
        # Checking that model is None before `fit` (and `construct`) calls.
        self.assertIsNone(surrogate._model)
        # Should instantiate mll and `fit_gpytorch_model` when `state_dict`
        # is `None`.
        surrogate.fit(
            training_data=self.training_data,
            bounds=self.bounds,
            task_features=self.task_features,
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            fidelity_features=self.fidelity_features,
            target_fidelities=self.target_fidelities,
            refit=self.refit,
        )
        mock_state_dict.assert_not_called()
        mock_MLL.assert_called_once()
        mock_fit_gpytorch.assert_called_once()
        mock_state_dict.reset_mock()
        mock_MLL.reset_mock()
        mock_fit_gpytorch.reset_mock()
        # Should `load_state_dict` when `state_dict` is not `None`
        # and `refit` is `False`.
        state_dict = {}
        surrogate.fit(
            training_data=self.training_data,
            bounds=self.bounds,
            task_features=self.task_features,
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            fidelity_features=self.fidelity_features,
            target_fidelities=self.target_fidelities,
            refit=False,
            state_dict=state_dict,
        )
        mock_state_dict.assert_called_once()
        mock_MLL.assert_not_called()
        mock_fit_gpytorch.assert_not_called()

    @patch(f"{SURROGATE_PATH}.predict_from_model")
    def test_predict(self, mock_predict):
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        self.surrogate.predict(X=self.X)
        mock_predict.assert_called_with(model=self.surrogate.model, X=self.X)

    def test_best_in_sample_point(self):
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        # `best_in_sample_point` requires `objective_weights`
        with patch(f"{SURROGATE_PATH}.best_in_sample_point",
                   return_value=None) as mock_best_in_sample:
            with self.assertRaisesRegex(ValueError, "Could not obtain"):
                self.surrogate.best_in_sample_point(bounds=self.bounds,
                                                    objective_weights=None)
        with patch(f"{SURROGATE_PATH}.best_in_sample_point",
                   return_value=(self.X, 0.0)) as mock_best_in_sample:
            best_point, observed_value = self.surrogate.best_in_sample_point(
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                options=self.options,
            )
            mock_best_in_sample.assert_called_with(
                Xs=[self.training_data.X],
                model=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                options=self.options,
            )

    @patch(f"{ACQUISITION_PATH}.Acquisition.__init__", return_value=None)
    @patch(
        f"{ACQUISITION_PATH}.Acquisition.optimize",
        return_value=([torch.tensor([0.0])], [torch.tensor([1.0])]),
    )
    @patch(
        f"{SURROGATE_PATH}.pick_best_out_of_sample_point_acqf_class",
        return_value=(qSimpleRegret, {
            Keys.SAMPLER: SobolQMCNormalSampler
        }),
    )
    def test_best_out_of_sample_point(self, mock_best_point_util,
                                      mock_acqf_optimize, mock_acqf_init):
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        # currently cannot use function with fixed features
        with self.assertRaisesRegex(NotImplementedError, "Fixed features"):
            self.surrogate.best_out_of_sample_point(
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                fixed_features=self.fixed_features,
            )
        candidate, acqf_value = self.surrogate.best_out_of_sample_point(
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fidelity_features=self.fidelity_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )
        mock_acqf_init.assert_called_with(
            surrogate=self.surrogate,
            botorch_acqf_class=qSimpleRegret,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=None,
            target_fidelities=self.target_fidelities,
            options={Keys.SAMPLER: SobolQMCNormalSampler},
        )
        self.assertTrue(torch.equal(candidate, torch.tensor([0.0])))
        self.assertTrue(torch.equal(acqf_value, torch.tensor([1.0])))

    @patch(f"{SURROGATE_PATH}.Surrogate.fit")
    def test_update(self, mock_fit):
        self.surrogate.construct(training_data=self.training_data,
                                 fidelity_features=self.fidelity_features)
        # Call `fit` by default
        self.surrogate.update(
            training_data=self.training_data,
            bounds=self.bounds,
            task_features=self.task_features,
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            fidelity_features=self.fidelity_features,
            refit=self.refit,
        )
        mock_fit.assert_called_with(
            training_data=self.training_data,
            bounds=self.bounds,
            task_features=self.task_features,
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            fidelity_features=self.fidelity_features,
            candidate_metadata=None,
            state_dict=self.surrogate.model.state_dict,
            refit=self.refit,
        )
        # If should not be reconstructed, raise Error
        self.surrogate._should_reconstruct = False
        with self.assertRaisesRegex(
                NotImplementedError,
                ".* models that should not be re-constructed"):
            self.surrogate.update(
                training_data=self.training_data,
                bounds=self.bounds,
                task_features=self.task_features,
                feature_names=self.feature_names,
                metric_names=self.metric_names,
                fidelity_features=self.fidelity_features,
                refit=self.refit,
            )
Ejemplo n.º 27
0
 def test_from_BoTorch(self):
     surrogate = Surrogate.from_BoTorch(
         self.botorch_model_class(**self.surrogate_kwargs))
     self.assertIsInstance(surrogate.model, self.botorch_model_class)
     self.assertFalse(surrogate._should_reconstruct)
Ejemplo n.º 28
0
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.acquisition_class = Acquisition
        self.botorch_acqf_class = qExpectedImprovement
        self.acquisition_options = ACQ_OPTIONS
        self.model = BoTorchModel(
            surrogate=self.surrogate,
            acquisition_class=self.acquisition_class,
            botorch_acqf_class=self.botorch_acqf_class,
            acquisition_options=self.acquisition_options,
        )

        self.dtype = torch.float
        self.device = torch.device("cpu")
        tkwargs = {"dtype": self.dtype, "device": self.device}
        Xs1, Ys1, Yvars1, self.bounds, _, _, _ = get_torch_test_data(
            dtype=self.dtype)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=self.dtype,
                                                           offset=1.0)
        self.Xs = Xs1
        self.Ys = Ys1
        self.Yvars = Yvars1
        self.X_test = Xs2[0]
        self.block_design_training_data = TrainingData(Xs=self.Xs,
                                                       Ys=self.Ys,
                                                       Yvars=self.Yvars)
        self.non_block_design_training_data = TrainingData(
            Xs=Xs1 + Xs2,
            Ys=Ys1 + Ys2,
            Yvars=Yvars1 + Yvars2,
        )
        self.search_space_digest = SearchSpaceDigest(
            feature_names=["x1", "x2", "x3"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
        )
        self.mf_search_space_digest = SearchSpaceDigest(
            feature_names=["x1", "x2", "x3"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
            task_features=[],
            fidelity_features=[2],
            target_fidelities={1: 1.0},
        )
        self.metric_names = ["y"]
        self.metric_names_for_list_surrogate = ["y1", "y2"]
        self.candidate_metadata = []
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024
        }
        self.model_gen_options = {
            Keys.OPTIMIZER_KWARGS: self.optimizer_options
        }
        self.objective_weights = torch.tensor([1.0], **tkwargs)
        self.moo_objective_weights = torch.tensor([1.0, 1.5, 0.0], **tkwargs)
        self.moo_objective_thresholds = torch.tensor(
            [0.5, 1.5, float("nan")], **tkwargs)
        self.outcome_constraints = None
        self.linear_constraints = None
        self.fixed_features = None
        self.pending_observations = None
        self.rounding_func = "func"
        self.moo_training_data = TrainingData(
            Xs=self.Xs * 3,
            Ys=self.non_block_design_training_data.Ys + self.Ys,
            Yvars=self.Yvars * 3,
        )
        self.moo_metric_names = ["y1", "y2", "y3"]
Ejemplo n.º 29
0
def surrogate_to_dict(surrogate: Surrogate) -> Dict[str, Any]:
    """Convert Ax surrogate to a dictionary."""
    dict_representation = {"__type": surrogate.__class__.__name__}
    dict_representation.update(surrogate._serialize_attributes_as_kwargs())
    return dict_representation
Ejemplo n.º 30
0
class AcquisitionTest(TestCase):
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(botorch_model_class=self.botorch_model_class)
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.Y = torch.tensor([[3.0], [4.0]])
        self.Yvar = torch.tensor([[0.0], [2.0]])
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.fidelity_features = [2]
        self.surrogate.construct(
            training_data=self.training_data, fidelity_features=self.fidelity_features
        )

        self.bounds = [(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)]
        self.botorch_acqf_class = DummyACQFClass
        self.objective_weights = torch.tensor([1.0])
        self.pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[2.0, 6.0, 8.0]]),
        ]
        self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.target_fidelities = {2: 1.0}
        self.options = {"best_f": 0.0}
        self.acquisition = Acquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        self.inequality_constraints = [
            (torch.tensor([0, 1]), torch.tensor([-1.0, 1.0]), 1)
        ]
        self.rounding_func = lambda x: x
        self.optimizer_options = {Keys.NUM_RESTARTS: 40, Keys.RAW_SAMPLES: 1024}

    @patch(
        f"{ACQUISITION_PATH}._get_X_pending_and_observed",
        return_value=(torch.tensor([2.0]), torch.tensor([3.0])),
    )
    @patch(f"{ACQUISITION_PATH}.subset_model", return_value=(None, None, None, None))
    @patch(f"{ACQUISITION_PATH}.get_botorch_objective")
    @patch(
        f"{CURRENT_PATH}.Acquisition.compute_model_dependencies",
        return_value={"current_value": 1.2},
    )
    @patch(f"{CURRENT_PATH}.Acquisition.compute_data_dependencies", return_value={})
    @patch(f"{DummyACQFClass.__module__}.DummyACQFClass.__init__", return_value=None)
    def test_init(
        self,
        mock_botorch_acqf_class,
        mock_compute_data_deps,
        mock_compute_model_deps,
        mock_get_objective,
        mock_subset_model,
        mock_get_X,
    ):
        self.acquisition.default_botorch_acqf_class = None
        with self.assertRaisesRegex(
            ValueError, ".*`botorch_acqf_class` argument must be specified."
        ):
            Acquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
            )

        botorch_objective = LinearMCObjective(weights=torch.tensor([1.0]))
        mock_get_objective.return_value = botorch_objective
        acquisition = Acquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        # Check `_get_X_pending_and_observed` kwargs
        mock_get_X.assert_called_with(
            Xs=[self.training_data.X],
            pending_observations=self.pending_observations,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            bounds=self.bounds,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
        )
        # Call `subset_model` only when needed
        mock_subset_model.assert_called_with(
            acquisition.surrogate.model,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
        )
        mock_subset_model.reset_mock()
        self.options[Keys.SUBSET_MODEL] = False
        acquisition = Acquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )
        mock_subset_model.assert_not_called()
        # Check `get_botorch_objective` kwargs
        mock_get_objective.assert_called_with(
            model=self.acquisition.surrogate.model,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            X_observed=torch.tensor([3.0]),
            use_scalarized_objective=False,
        )
        # Check `compute_model_dependencies` kwargs
        mock_compute_model_deps.assert_called_with(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )
        # Check `compute_data_dependencies` kwargs
        mock_compute_data_deps.assert_called_with(training_data=self.training_data)
        # Check final `acqf` creation
        model_deps = {Keys.CURRENT_VALUE: 1.2}
        data_deps = {}
        mock_botorch_acqf_class.assert_called_with(
            model=self.acquisition.surrogate.model,
            objective=botorch_objective,
            X_pending=torch.tensor([2.0]),
            X_baseline=torch.tensor([3.0]),
            **self.options,
            **model_deps,
            **data_deps,
        )

    @patch(f"{ACQUISITION_PATH}.optimize_acqf")
    def test_optimize(self, mock_optimize_acqf):
        self.acquisition.optimize(
            bounds=self.bounds,
            n=3,
            optimizer_class=None,
            inequality_constraints=self.inequality_constraints,
            fixed_features=self.fixed_features,
            rounding_func=self.rounding_func,
            optimizer_options=self.optimizer_options,
        )
        mock_optimize_acqf.assert_called_with(
            self.acquisition.acqf,
            bounds=self.bounds,
            q=3,
            inequality_constraints=self.inequality_constraints,
            fixed_features=self.fixed_features,
            post_processing_func=self.rounding_func,
            **self.optimizer_options,
        )

    @patch(f"{SURROGATE_PATH}.Surrogate.best_in_sample_point")
    def test_best_point(self, mock_best_point):
        self.acquisition.best_point(
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )
        mock_best_point.assert_called_with(
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            options=self.options,
        )

    @patch(f"{DummyACQFClass.__module__}.DummyACQFClass.__call__", return_value=None)
    def test_evaluate(self, mock_call):
        self.acquisition.evaluate(X=self.X)
        mock_call.assert_called_with(X=self.X)

    def test_extract_training_data(self):
        self.assertEqual(  # Base `Surrogate` case.
            self.acquisition._extract_training_data(surrogate=self.surrogate),
            self.training_data,
        )
        # `ListSurrogate` case.
        list_surrogate = ListSurrogate(botorch_submodel_class=self.botorch_model_class)
        list_surrogate._training_data_per_outcome = {"a": self.training_data}
        self.assertEqual(
            self.acquisition._extract_training_data(surrogate=list_surrogate),
            list_surrogate._training_data_per_outcome,
        )