def test_project_to_sample_points(self): for batch_shape, dtype in itertools.product( ([], [2]), (torch.float, torch.double) ): q, d, p, d_prime = 1, 12, 7, 4 X = torch.rand(*batch_shape, q, d, device=self.device, dtype=dtype) sample_points = torch.rand(p, d_prime, device=self.device, dtype=dtype) X_augmented = project_to_sample_points(X=X, sample_points=sample_points) self.assertEqual(X_augmented.shape, torch.Size(batch_shape + [p, d])) if batch_shape == [2]: self.assertTrue( torch.allclose(X_augmented[0, :, -d_prime:], sample_points) ) else: self.assertTrue( torch.allclose(X_augmented[:, -d_prime:], sample_points) )
def test_evaluate_qMFKG(self): for dtype in (torch.float, torch.double): # basic test n_f = 4 current_value = torch.rand(1, device=self.device, dtype=dtype) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # batched evaluation b = 2 current_value = torch.rand(b, device=self.device, dtype=dtype) cau = GenericCostAwareUtility(mock_util) mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # pending points and current value mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype) X_pending = torch.rand(2, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, variance=variance)) current_value = torch.rand(1, device=self.device, dtype=dtype) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, X_pending=X_pending, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1])) val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # test objective (inner MC sampling) objective = GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1)) samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(samples=samples)) X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, objective=objective, current_value=current_value, cost_aware_utility=cau, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1])) val_exp = mock_util(X, objective(samples) - current_value).mean(dim=0) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :])) # test valfunc_cls and valfunc_argfac d, p, d_prime = 4, 3, 2 samples = torch.ones(3, 1, 1, device=self.device, dtype=dtype) mean = torch.tensor( [[0.25], [0.5], [0.75]], device=self.device, dtype=dtype ) weights = torch.tensor([0.5, 1.0, 1.0], device=self.device, dtype=dtype) mfm = MockModel(MockPosterior(mean=mean, samples=samples)) X = torch.rand(n_f * d + d, d, device=self.device, dtype=dtype) sample_points = torch.rand(p, d_prime, device=self.device, dtype=dtype) with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f: with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs: mock_num_outputs.return_value = 1 mm = MockModel(None) qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, project=lambda X: project_to_sample_points(X, sample_points), valfunc_cls=ScalarizedPosteriorMean, valfunc_argfac=lambda model: {"weights": weights}, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4])) val_exp = torch.tensor([1.375], dtype=dtype) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4)) patch_f.reset_mock() qMFKG = qMultiFidelityKnowledgeGradient( model=mm, num_fantasies=n_f, project=lambda X: project_to_sample_points(X, sample_points), valfunc_cls=qExpectedImprovement, valfunc_argfac=lambda model: {"best_f": 0.0}, ) val = qMFKG(X) patch_f.assert_called_once() cargs, ckwargs = patch_f.call_args self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4])) val_exp = torch.tensor([1.0], dtype=dtype) self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))