def test_KnowledgeGradient(self): model = KnowledgeGradient() model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, feature_names=self.feature_names, metric_names=self.metric_names, task_features=[], fidelity_features=[], ) n = 2 X_dummy = torch.rand(1, n, 4, dtype=self.dtype, device=self.device) acq_dummy = torch.tensor(0.0, dtype=self.dtype, device=self.device) with mock.patch(self.optimize_acqf) as mock_optimize_acqf: mock_optimize_acqf.side_effect = [(X_dummy, acq_dummy)] Xgen, wgen, _, __ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": self.optimizer_options, }, ) self.assertTrue(torch.equal(Xgen, X_dummy.cpu())) self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=self.dtype))) # called once, the best point call is not caught by mock mock_optimize_acqf.assert_called_once() ini_dummy = torch.rand(10, 32, 3, dtype=self.dtype, device=self.device) optimizer_options2 = { "num_restarts": 1, "raw_samples": 1, "maxiter": 5, "batch_limit": 1, "partial_restarts": 2, } with mock.patch( "ax.models.torch.botorch_kg.gen_one_shot_kg_initial_conditions", return_value=ini_dummy, ) as mock_warmstart_initialization: Xgen, wgen, _, __ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": optimizer_options2, }, ) mock_warmstart_initialization.assert_called_once() obj = ScalarizedObjective(weights=self.objective_weights) dummy_acq = PosteriorMean(model=model.model, objective=obj) with mock.patch( "ax.models.torch.utils.PosteriorMean", return_value=dummy_acq ) as mock_posterior_mean: Xgen, wgen, _, __ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": optimizer_options2, }, ) self.assertEqual(mock_posterior_mean.call_count, 2) # Check best point selection within bounds (some numerical tolerance) xbest = model.best_point( bounds=self.bounds, objective_weights=self.objective_weights ) lb = torch.tensor([b[0] for b in self.bounds]) - 1e-5 ub = torch.tensor([b[1] for b in self.bounds]) + 1e-5 self.assertTrue(torch.all(xbest <= ub)) self.assertTrue(torch.all(xbest >= lb)) # test error message linear_constraints = ( torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]), torch.tensor([[0.5], [1.0]]), ) with self.assertRaises(UnsupportedError): Xgen, wgen = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=linear_constraints, ) # test input warping self.assertFalse(model.use_input_warping) model = KnowledgeGradient(use_input_warping=True) model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, feature_names=self.feature_names, metric_names=self.metric_names, task_features=[], fidelity_features=[], ) self.assertTrue(model.use_input_warping) self.assertTrue(hasattr(model.model, "input_transform")) self.assertIsInstance(model.model.input_transform, Warp) # test loocv pseudo likelihood self.assertFalse(model.use_loocv_pseudo_likelihood) model = KnowledgeGradient(use_loocv_pseudo_likelihood=True) model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, feature_names=self.feature_names, metric_names=self.metric_names, task_features=[], fidelity_features=[], ) self.assertTrue(model.use_loocv_pseudo_likelihood)
def test_KnowledgeGradient(self): model = KnowledgeGradient() model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, feature_names=self.feature_names, metric_names=self.metric_names, task_features=[], fidelity_features=[], ) n = 2 best_point_dummy = torch.rand(1, 3, dtype=self.dtype, device=self.device) X_dummy = torch.rand(1, n, 4, dtype=self.dtype, device=self.device) acq_dummy = torch.tensor(0.0, dtype=self.dtype, device=self.device) with mock.patch(self.optimize_acqf) as mock_optimize_acqf: mock_optimize_acqf.side_effect = [ (best_point_dummy, None), (X_dummy, acq_dummy), ] Xgen, wgen, _ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": self.optimizer_options, }, ) self.assertTrue(torch.equal(Xgen, X_dummy.cpu())) self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=self.dtype))) mock_optimize_acqf.assert_called( ) # called twice, once for best_point ini_dummy = torch.rand(10, 32, 3, dtype=self.dtype, device=self.device) optimizer_options2 = { "num_restarts": 1, "raw_samples": 1, "maxiter": 5, "batch_limit": 1, "partial_restarts": 2, } with mock.patch( "ax.models.torch.botorch_kg.gen_one_shot_kg_initial_conditions", return_value=ini_dummy, ) as mock_warmstart_initialization: Xgen, wgen, _ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": optimizer_options2, }, ) mock_warmstart_initialization.assert_called_once() obj = ScalarizedObjective(weights=self.objective_weights) dummy_acq = PosteriorMean(model=model.model, objective=obj) with mock.patch("ax.models.torch.botorch_kg.PosteriorMean", return_value=dummy_acq) as mock_posterior_mean: Xgen, wgen, _ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": optimizer_options2, }, ) self.assertEqual(mock_posterior_mean.call_count, 2) # Check best point selection X_dummy = torch.rand(3) acq_dummy = torch.tensor(0.0) with mock.patch(self.optimize_acqf, return_value=(X_dummy, acq_dummy)) as mock_optimize_acqf: xbest = model.best_point(bounds=self.bounds, objective_weights=self.objective_weights) self.assertTrue(torch.equal(xbest, X_dummy)) mock_optimize_acqf.assert_called_once() # test error message linear_constraints = ( torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]), torch.tensor([[0.5], [1.0]]), ) with self.assertRaises(UnsupportedError): Xgen, wgen = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=linear_constraints, )
def test_KnowledgeGradient_multifidelity(self): model = KnowledgeGradient() model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, task_features=[], feature_names=self.feature_names, metric_names=["L2NormMetric"], fidelity_features=[2], ) # Check best point selection within bounds (some numerical tolerance) xbest = model.best_point( bounds=self.bounds, objective_weights=self.objective_weights, target_fidelities={2: 5.0}, ) lb = torch.tensor([b[0] for b in self.bounds]) - 1e-5 ub = torch.tensor([b[1] for b in self.bounds]) + 1e-5 self.assertTrue(torch.all(xbest <= ub)) self.assertTrue(torch.all(xbest >= lb)) # check error when no target fidelities are specified with self.assertRaises(RuntimeError): model.best_point( bounds=self.bounds, objective_weights=self.objective_weights ) # check generation n = 2 X_dummy = torch.zeros(1, n, 3, dtype=self.dtype, device=self.device) acq_dummy = torch.tensor(0.0, dtype=self.dtype, device=self.device) dummy = (X_dummy, acq_dummy) with mock.patch(self.optimize_acqf, side_effect=[dummy]) as mock_optimize_acqf: Xgen, wgen, _, __ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": self.optimizer_options, }, target_fidelities={2: 5.0}, ) self.assertTrue(torch.equal(Xgen, X_dummy.cpu())) self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=self.dtype))) mock_optimize_acqf.assert_called() # called twice, once for best_point # test error message linear_constraints = ( torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]), torch.tensor([[0.5], [1.0]]), ) with self.assertRaises(UnsupportedError): xbest = model.best_point( bounds=self.bounds, linear_constraints=linear_constraints, objective_weights=self.objective_weights, target_fidelities={2: 1.0}, ) # test input warping self.assertFalse(model.use_input_warping) model = KnowledgeGradient(use_input_warping=True) model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, task_features=[], feature_names=self.feature_names, metric_names=["L2NormMetric"], fidelity_features=[2], ) self.assertTrue(model.use_input_warping) self.assertTrue(hasattr(model.model, "input_transform")) self.assertIsInstance(model.model.input_transform, Warp) # test loocv pseudo likelihood self.assertFalse(model.use_loocv_pseudo_likelihood) model = KnowledgeGradient(use_loocv_pseudo_likelihood=True) model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, task_features=[], feature_names=self.feature_names, metric_names=["L2NormMetric"], fidelity_features=[2], ) self.assertTrue(model.use_loocv_pseudo_likelihood)
def test_KnowledgeGradient_multifidelity(self): model = KnowledgeGradient() model.fit( Xs=self.Xs, Ys=self.Ys, Yvars=self.Yvars, bounds=self.bounds, task_features=[], feature_names=self.feature_names, metric_names=[], fidelity_features=[-1], ) # Check best point selection X_dummy = torch.tensor([1.0, 2.0]) acq_dummy = torch.tensor(0.0) with mock.patch(self.optimize_acqf, return_value=(X_dummy, acq_dummy)) as mock_optimize_acqf: xbest = model.best_point( bounds=self.bounds, objective_weights=self.objective_weights, target_fidelities={2: 1.0}, ) self.assertTrue(torch.equal(xbest, torch.tensor([1.0, 2.0, 1.0]))) mock_optimize_acqf.assert_called_once() # check error whenf no target fidelities are specified with self.assertRaises(RuntimeError): model.best_point(bounds=self.bounds, objective_weights=self.objective_weights) # check generation n = 2 X_dummy = torch.zeros(12, 1, 2, dtype=self.dtype, device=self.device) X_dummy2 = torch.zeros(1, n, 3, dtype=self.dtype, device=self.device) acq_dummy = torch.tensor(0.0, dtype=self.dtype, device=self.device) dummy1 = (X_dummy, acq_dummy) dummy2 = (X_dummy2, acq_dummy) with mock.patch(self.optimize_acqf, side_effect=[dummy1, dummy2, dummy1, dummy2]) as mock_optimize_acqf: Xgen, wgen, _ = model.gen( n=n, bounds=self.bounds, objective_weights=self.objective_weights, outcome_constraints=None, linear_constraints=None, model_gen_options={ "acquisition_function_kwargs": self.acq_options, "optimizer_kwargs": self.optimizer_options, }, target_fidelities={2: 1.0}, ) self.assertTrue(torch.equal(Xgen, X_dummy2.cpu())) self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=self.dtype))) mock_optimize_acqf.assert_called( ) # called twice, once for best_point # test error message linear_constraints = ( torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]), torch.tensor([[0.5], [1.0]]), ) with self.assertRaises(UnsupportedError): xbest = model.best_point( bounds=self.bounds, linear_constraints=linear_constraints, objective_weights=self.objective_weights, target_fidelities={2: 1.0}, )