def test_initialize_q_multi_fidelity_knowledge_gradient(self): for dtype in (torch.float, torch.double): mm = MockModel(MockPosterior()) # test error when not specifying current_value with self.assertRaises(UnsupportedError): qMultiFidelityKnowledgeGradient(model=mm, num_fantasies=None, cost_aware_utility=mock.Mock()) # test default construction mock_cau = mock.Mock() current_value = torch.zeros(1, device=self.device, dtype=dtype) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=32, current_value=current_value, cost_aware_utility=mock_cau, ) self.assertEqual(qMFKG.num_fantasies, 32) self.assertIsInstance(qMFKG.sampler, SobolQMCNormalSampler) self.assertEqual(qMFKG.sampler.sample_shape, torch.Size([32])) self.assertIsNone(qMFKG.objective) self.assertIsNone(qMFKG.inner_sampler) self.assertIsNone(qMFKG.X_pending) self.assertEqual(qMFKG.get_augmented_q_batch_size(q=3), 32 + 3) self.assertEqual(qMFKG.cost_aware_utility, mock_cau) self.assertTrue(torch.equal(qMFKG.current_value, current_value)) self.assertIsNone(qMFKG._cost_sampler) X = torch.rand(2, 3, device=self.device, dtype=dtype) self.assertTrue(torch.equal(qMFKG.project(X), X)) self.assertTrue(torch.equal(qMFKG.expand(X), X)) # make sure cost sampling logic works self.assertIsInstance(qMFKG.cost_sampler, SobolQMCNormalSampler) self.assertEqual(qMFKG.cost_sampler.sample_shape, torch.Size([32]))
def _instantiate_KG( model: Model, objective: AcquisitionObjective, qmc: bool = True, n_fantasies: int = 64, mc_samples: int = 256, num_trace_observations: int = 0, seed_inner: Optional[int] = None, seed_outer: Optional[int] = None, X_pending: Optional[Tensor] = None, current_value: Optional[Tensor] = None, target_fidelities: Optional[Dict[int, float]] = None, fidelity_weights: Optional[Dict[int, float]] = None, cost_intercept: float = 1.0, ) -> qKnowledgeGradient: r"""Instantiate either a `qKnowledgeGradient` or `qMultiFidelityKnowledgeGradient` acquisition function depending on whether `target_fidelities` is defined. """ sampler_cls = SobolQMCNormalSampler if qmc else IIDNormalSampler fantasy_sampler = sampler_cls(num_samples=n_fantasies, seed=seed_outer) if isinstance(objective, MCAcquisitionObjective): inner_sampler = sampler_cls(num_samples=mc_samples, seed=seed_inner) else: inner_sampler = None if target_fidelities: if fidelity_weights is None: fidelity_weights = {f: 1.0 for f in target_fidelities} if not set(target_fidelities) == set(fidelity_weights): raise RuntimeError( "Must provide the same indices for target_fidelities " f"({set(target_fidelities)}) and fidelity_weights " f" ({set(fidelity_weights)}).") cost_model = AffineFidelityCostModel(fidelity_weights=fidelity_weights, fixed_cost=cost_intercept) cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model) def project(X: Tensor) -> Tensor: return project_to_target_fidelity( X=X, target_fidelities=target_fidelities) def expand(X: Tensor) -> Tensor: return expand_trace_observations( X=X, fidelity_dims=sorted(target_fidelities), # pyre-ignore: [6] num_trace_obs=num_trace_observations, ) return qMultiFidelityKnowledgeGradient( model=model, num_fantasies=n_fantasies, sampler=fantasy_sampler, objective=objective, inner_sampler=inner_sampler, X_pending=X_pending, current_value=current_value, cost_aware_utility=cost_aware_utility, project=project, expand=expand, ) return qKnowledgeGradient( model=model, num_fantasies=n_fantasies, sampler=fantasy_sampler, objective=objective, inner_sampler=inner_sampler, X_pending=X_pending, current_value=current_value, )
def test_fixed_evaluation_qMFKG(self): # mock test qMFKG.evaluate() with expand, project & cost aware utility for dtype in (torch.float, torch.double): mean = torch.zeros(1, 1, 1, device=self.device, dtype=dtype) mm = MockModel(MockPosterior(mean=mean)) cau = GenericCostAwareUtility(mock_util) n_f = 4 mean = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with ExitStack() as es: patch_f = es.enter_context( mock.patch.object(MockModel, "fantasize", return_value=mfm) ) mock_num_outputs = es.enter_context( mock.patch(NO, new_callable=mock.PropertyMock) ) es.enter_context( mock.patch( "botorch.optim.optimize.optimize_acqf", return_value=( torch.ones(1, 1, 1, device=self.device, dtype=dtype), torch.ones(1, device=self.device, dtype=dtype), ), ), ) es.enter_context( mock.patch( "botorch.generation.gen.gen_candidates_scipy", return_value=( torch.ones(1, 1, 1, device=self.device, dtype=dtype), torch.ones(1, device=self.device, dtype=dtype), ), ), ) mock_num_outputs.return_value = 1 qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, X_pending=torch.rand(1, 1, 1, device=self.device, dtype=dtype), current_value=torch.zeros(1, device=self.device, dtype=dtype), cost_aware_utility=cau, project=lambda X: torch.zeros_like(X), expand=lambda X: torch.ones_like(X), ) val = qMFKG.evaluate( X=torch.zeros(1, 1, 1, device=self.device, dtype=dtype), bounds=torch.tensor([[0.0], [1.0]]), num_restarts=1, raw_samples=1, ) patch_f.asset_called_once() cargs, ckwargs = patch_f.call_args self.assertTrue( torch.equal( ckwargs["X"], torch.ones(1, 2, 1, device=self.device, dtype=dtype), ) ) self.assertEqual( val, cau(None, torch.ones(1, device=self.device, dtype=dtype)) )
def test_evaluate_qMFKG(self): for dtype in (torch.float, torch.double): # basic test n_f = 4 current_value = torch.rand(1, device=self.device, dtype=dtype) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # batched evaluation b = 2 current_value = torch.rand(b, device=self.device, dtype=dtype) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # pending points and current value mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) X_pending = torch.rand(2, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) current_value = torch.rand(1, device=self.device, dtype=dtype) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, X_pending=X_pending, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # test objective (inner MC sampling) objective = GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1)) samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(samples=samples)) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, objective=objective, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, objective(samples) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # test valfunc_cls and valfunc_argfac d, p, d_prime = 4, 3, 2 samples = torch.ones(3, 1, 1, device=self.device, dtype=dtype) mean = torch.tensor( [[0.25], [0.5], [0.75]], device=self.device, dtype=dtype ) weights = torch.tensor([0.5, 1.0, 1.0], device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, samples=samples)) X = torch.rand(n_f * d + d, d, device=self.device, dtype=dtype) sample_points = torch.rand(p, d_prime, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, project=lambda X: project_to_sample_points(X, sample_points), valfunc_cls=ScalarizedPosteriorMean, valfunc_argfac=lambda model: {"weights": weights}, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4])) val_exp = torch.tensor([1.375], dtype=dtype) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) patch_f.reset_mock() qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, project=lambda X: project_to_sample_points(X, sample_points), valfunc_cls=qExpectedImprovement, valfunc_argfac=lambda model: {"best_f": 0.0}, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4])) val_exp = torch.tensor([1.0], dtype=dtype) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
def test_evaluate_q_multi_fidelity_knowledge_gradient(self): for dtype in (torch.float, torch.double): # basic test n_f = 4 current_value = torch.rand(1, device=self.device, dtype=dtype) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue( torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # batched evaluation b = 2 current_value = torch.rand(b, device=self.device, dtype=dtype) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue( torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # pending points and current value mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) X_pending = torch.rand(2, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) current_value = torch.rand(1, device=self.device, dtype=dtype) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, X_pending=X_pending, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue( torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # test objective (inner MC sampling) objective = GenericMCObjective(objective=lambda Y: Y.norm(dim=-1)) samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(samples=samples)) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, objective=objective, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, objective(samples) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue( torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))