Exemplo n.º 1
0
    def test_GenericCostAwareUtility(self):
        def cost(X, deltas, **kwargs):
            return deltas.mean(dim=-1) / X[..., 1].sum(dim=-1)

        for dtype in (torch.float, torch.double):
            u = GenericCostAwareUtility(cost)
            X = torch.rand(3, 2, device=self.device, dtype=dtype)
            deltas = torch.rand(5, 3, device=self.device, dtype=dtype)
            self.assertIsInstance(u, GenericCostAwareUtility)
            self.assertTrue(torch.equal(u(X, deltas), cost(X, deltas)))
            X = torch.rand(4, 3, 2, device=self.device, dtype=dtype)
            deltas = torch.rand(5, 4, 3, device=self.device, dtype=dtype)
            self.assertIsInstance(u, GenericCostAwareUtility)
            self.assertTrue(torch.equal(u(X, deltas), cost(X, deltas)))
Exemplo n.º 2
0
    def test_fixed_evaluation_qMFKG(self):
        # mock test qMFKG.evaluate() with expand, project & cost aware utility
        for dtype in (torch.float, torch.double):
            mean = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean))
            cau = GenericCostAwareUtility(mock_util)
            n_f = 4
            mean = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype)
            variance = torch.rand(n_f, 2, 1, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, variance=variance))
            with ExitStack() as es:
                patch_f = es.enter_context(
                    mock.patch.object(MockModel, "fantasize", return_value=mfm)
                )
                mock_num_outputs = es.enter_context(
                    mock.patch(NO, new_callable=mock.PropertyMock)
                )
                es.enter_context(
                    mock.patch(
                        "botorch.optim.optimize.optimize_acqf",
                        return_value=(
                            torch.ones(1, 1, 1, device=self.device, dtype=dtype),
                            torch.ones(1, device=self.device, dtype=dtype),
                        ),
                    ),
                )
                es.enter_context(
                    mock.patch(
                        "botorch.generation.gen.gen_candidates_scipy",
                        return_value=(
                            torch.ones(1, 1, 1, device=self.device, dtype=dtype),
                            torch.ones(1, device=self.device, dtype=dtype),
                        ),
                    ),
                )

                mock_num_outputs.return_value = 1
                qMFKG = qMultiFidelityKnowledgeGradient(
                    model=mm,
                    num_fantasies=n_f,
                    X_pending=torch.rand(1, 1, 1, device=self.device, dtype=dtype),
                    current_value=torch.zeros(1, device=self.device, dtype=dtype),
                    cost_aware_utility=cau,
                    project=lambda X: torch.zeros_like(X),
                    expand=lambda X: torch.ones_like(X),
                )
                val = qMFKG.evaluate(
                    X=torch.zeros(1, 1, 1, device=self.device, dtype=dtype),
                    bounds=torch.tensor([[0.0], [1.0]]),
                    num_restarts=1,
                    raw_samples=1,
                )
                patch_f.asset_called_once()
                cargs, ckwargs = patch_f.call_args
                self.assertTrue(
                    torch.equal(
                        ckwargs["X"],
                        torch.ones(1, 2, 1, device=self.device, dtype=dtype),
                    )
                )
            self.assertEqual(
                val, cau(None, torch.ones(1, device=self.device, dtype=dtype))
            )
Exemplo n.º 3
0
    def test_evaluate_qMFKG(self):
        for dtype in (torch.float, torch.double):
            # basic test
            n_f = 4
            current_value = torch.rand(1, device=self.device, dtype=dtype)
            cau = GenericCostAwareUtility(mock_util)
            mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, variance=variance))
            with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
                with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qMFKG = qMultiFidelityKnowledgeGradient(
                        model=mm,
                        num_fantasies=n_f,
                        current_value=current_value,
                        cost_aware_utility=cau,
                    )
                    X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
                    val = qMFKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
            val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0)
            self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
            self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
            # batched evaluation
            b = 2
            current_value = torch.rand(b, device=self.device, dtype=dtype)
            cau = GenericCostAwareUtility(mock_util)
            mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
            variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, variance=variance))
            X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype)
            with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
                with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qMFKG = qMultiFidelityKnowledgeGradient(
                        model=mm,
                        num_fantasies=n_f,
                        current_value=current_value,
                        cost_aware_utility=cau,
                    )
                    val = qMFKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1]))
            val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0)
            self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
            self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
            # pending points and current value
            mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            X_pending = torch.rand(2, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, variance=variance))
            current_value = torch.rand(1, device=self.device, dtype=dtype)
            X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
            with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
                with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qMFKG = qMultiFidelityKnowledgeGradient(
                        model=mm,
                        num_fantasies=n_f,
                        X_pending=X_pending,
                        current_value=current_value,
                        cost_aware_utility=cau,
                    )
                    val = qMFKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1]))
            val_exp = mock_util(X, mean.squeeze(-1) - current_value).mean(dim=0)
            self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
            self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
            # test objective (inner MC sampling)
            objective = GenericMCObjective(objective=lambda Y, X: Y.norm(dim=-1))
            samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(samples=samples))
            X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
            with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
                with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qMFKG = qMultiFidelityKnowledgeGradient(
                        model=mm,
                        num_fantasies=n_f,
                        objective=objective,
                        current_value=current_value,
                        cost_aware_utility=cau,
                    )
                    val = qMFKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
            val_exp = mock_util(X, objective(samples) - current_value).mean(dim=0)
            self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
            self.assertTrue(torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
            # test valfunc_cls and valfunc_argfac
            d, p, d_prime = 4, 3, 2
            samples = torch.ones(3, 1, 1, device=self.device, dtype=dtype)
            mean = torch.tensor(
                [[0.25], [0.5], [0.75]], device=self.device, dtype=dtype
            )
            weights = torch.tensor([0.5, 1.0, 1.0], device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, samples=samples))
            X = torch.rand(n_f * d + d, d, device=self.device, dtype=dtype)
            sample_points = torch.rand(p, d_prime, device=self.device, dtype=dtype)
            with mock.patch.object(MockModel, "fantasize", return_value=mfm) as patch_f:
                with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qMFKG = qMultiFidelityKnowledgeGradient(
                        model=mm,
                        num_fantasies=n_f,
                        project=lambda X: project_to_sample_points(X, sample_points),
                        valfunc_cls=ScalarizedPosteriorMean,
                        valfunc_argfac=lambda model: {"weights": weights},
                    )
                    val = qMFKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4]))
                    val_exp = torch.tensor([1.375], dtype=dtype)
                    self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))

                    patch_f.reset_mock()
                    qMFKG = qMultiFidelityKnowledgeGradient(
                        model=mm,
                        num_fantasies=n_f,
                        project=lambda X: project_to_sample_points(X, sample_points),
                        valfunc_cls=qExpectedImprovement,
                        valfunc_argfac=lambda model: {"best_f": 0.0},
                    )
                    val = qMFKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 16, 4]))
                    val_exp = torch.tensor([1.0], dtype=dtype)
                    self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
Exemplo n.º 4
0
 def test_evaluate_q_multi_fidelity_knowledge_gradient(self):
     for dtype in (torch.float, torch.double):
         # basic test
         n_f = 4
         current_value = torch.rand(1, device=self.device, dtype=dtype)
         cau = GenericCostAwareUtility(mock_util)
         mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
         variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
         mfm = MockModel(MockPosterior(mean=mean, variance=variance))
         with mock.patch.object(MockModel, "fantasize",
                                return_value=mfm) as patch_f:
             mm = MockModel(None)
             qMFKG = qMultiFidelityKnowledgeGradient(
                 model=mm,
                 num_fantasies=n_f,
                 current_value=current_value,
                 cost_aware_utility=cau,
             )
             X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
             val = qMFKG(X)
             patch_f.assert_called_once()
             cargs, ckwargs = patch_f.call_args
             self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
         val_exp = mock_util(X,
                             mean.squeeze(-1) - current_value).mean(dim=0)
         self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
         self.assertTrue(
             torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
         # batched evaluation
         b = 2
         current_value = torch.rand(b, device=self.device, dtype=dtype)
         cau = GenericCostAwareUtility(mock_util)
         mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
         variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
         mfm = MockModel(MockPosterior(mean=mean, variance=variance))
         X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype)
         with mock.patch.object(MockModel, "fantasize",
                                return_value=mfm) as patch_f:
             mm = MockModel(None)
             qMFKG = qMultiFidelityKnowledgeGradient(
                 model=mm,
                 num_fantasies=n_f,
                 current_value=current_value,
                 cost_aware_utility=cau,
             )
             val = qMFKG(X)
             patch_f.assert_called_once()
             cargs, ckwargs = patch_f.call_args
             self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1]))
         val_exp = mock_util(X,
                             mean.squeeze(-1) - current_value).mean(dim=0)
         self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
         self.assertTrue(
             torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
         # pending points and current value
         mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
         variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
         X_pending = torch.rand(2, 1, device=self.device, dtype=dtype)
         mfm = MockModel(MockPosterior(mean=mean, variance=variance))
         current_value = torch.rand(1, device=self.device, dtype=dtype)
         X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
         with mock.patch.object(MockModel, "fantasize",
                                return_value=mfm) as patch_f:
             mm = MockModel(None)
             qMFKG = qMultiFidelityKnowledgeGradient(
                 model=mm,
                 num_fantasies=n_f,
                 X_pending=X_pending,
                 current_value=current_value,
                 cost_aware_utility=cau,
             )
             val = qMFKG(X)
             patch_f.assert_called_once()
             cargs, ckwargs = patch_f.call_args
             self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1]))
         val_exp = mock_util(X,
                             mean.squeeze(-1) - current_value).mean(dim=0)
         self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
         self.assertTrue(
             torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))
         # test objective (inner MC sampling)
         objective = GenericMCObjective(objective=lambda Y: Y.norm(dim=-1))
         samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype)
         mfm = MockModel(MockPosterior(samples=samples))
         X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
         with mock.patch.object(MockModel, "fantasize",
                                return_value=mfm) as patch_f:
             mm = MockModel(None)
             qMFKG = qMultiFidelityKnowledgeGradient(
                 model=mm,
                 num_fantasies=n_f,
                 objective=objective,
                 current_value=current_value,
                 cost_aware_utility=cau,
             )
             val = qMFKG(X)
             patch_f.assert_called_once()
             cargs, ckwargs = patch_f.call_args
             self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
         val_exp = mock_util(X,
                             objective(samples) - current_value).mean(dim=0)
         self.assertTrue(torch.allclose(val, val_exp, atol=1e-4))
         self.assertTrue(
             torch.equal(qMFKG.extract_candidates(X), X[..., :-n_f, :]))