コード例 #1
0
ファイル: test_botorch_model.py プロジェクト: proteanblank/Ax
    def test_BotorchModelConstraints(self):
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
            dtype=torch.float, cuda=False, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=torch.float,
                                                           cuda=False,
                                                           constant_noise=True)
        # make infeasible
        Xs2[0] = -1 * Xs2[0]
        objective_weights = torch.tensor([-1.0, 1.0],
                                         dtype=torch.float,
                                         device=torch.device("cpu"))
        n = 3
        model = BotorchModel()
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            _mock_fit_model.assert_called_once()

        # because there are no feasible points:
        with self.assertRaises(ValueError):
            model.gen(n, bounds, objective_weights)
コード例 #2
0
ファイル: test_botorch_model.py プロジェクト: proteanblank/Ax
    def test_fixed_rank_BotorchModel(self, dtype=torch.float, cuda=False):
        Xs1, Ys1, Yvars1, bounds, _, fns, __package__ = get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=dtype,
                                                           cuda=cuda,
                                                           constant_noise=True)
        model = BotorchModel(multitask_gp_ranks={"y": 2, "w": 1})

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=[0],
                ),
                metric_names=["y", "w"],
            )
            _mock_fit_model.assert_called_once()

        # Check ranks
        model_list = model.model.models
        self.assertEqual(model_list[0]._rank, 2)
        self.assertEqual(model_list[1]._rank, 1)
コード例 #3
0
 def setUp(self):
     self.dtype = torch.float
     self.Xs, self.Ys, self.Yvars, _, _, _, _ = get_torch_test_data(
         dtype=self.dtype)
     self.Xs2, self.Ys2, self.Yvars2, _, _, _, _ = get_torch_test_data(
         dtype=self.dtype,
         offset=1.0  # Making this data different.
     )
     self.none_Yvars = [torch.tensor([[np.nan], [np.nan]])]
     self.task_features = []
     self.objective_thresholds = torch.tensor([0.5, 1.5])
コード例 #4
0
 def setUp(self):
     self.dtype = torch.float
     self.Xs, self.Ys, self.Yvars, _, _, _, _ = get_torch_test_data(
         dtype=self.dtype)
     self.Xs2, self.Ys2, self.Yvars2, _, _, _, _ = get_torch_test_data(
         dtype=self.dtype,
         offset=1.0  # Making this data different.
     )
     # self.Xs = Xs1
     # self.Ys = [torch.tensor([[3.0], [4.0]])]
     # self.Yvars = [torch.tensor([[0.0], [2.0]])]
     self.none_Yvars = [torch.tensor([[np.nan], [np.nan]])]
     self.task_features = []
コード例 #5
0
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.acquisition_class = KnowledgeGradient
        self.botorch_acqf_class = qKnowledgeGradient
        self.acquisition_options = {Keys.NUM_FANTASIES: 64}
        self.model = BoTorchModel(
            surrogate=self.surrogate,
            acquisition_class=self.acquisition_class,
            acquisition_options=self.acquisition_options,
        )

        self.dtype = torch.float
        Xs1, Ys1, Yvars1, self.bounds, _, _, _ = get_torch_test_data(
            dtype=self.dtype)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=self.dtype,
                                                           offset=1.0)
        self.Xs = Xs1 + Xs2
        self.Ys = Ys1 + Ys2
        self.Yvars = Yvars1 + Yvars2
        self.X = Xs1[0]
        self.Y = Ys1[0]
        self.Yvar = Yvars1[0]
        self.X2 = Xs2[0]
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.search_space_digest = SearchSpaceDigest(
            feature_names=["x1", "x2", "x3"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
            task_features=[],
            fidelity_features=[2],
            target_fidelities={1: 1.0},
        )
        self.metric_names = ["y"]
        self.metric_names_for_list_surrogate = ["y1", "y2"]
        self.candidate_metadata = []
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024
        }
        self.model_gen_options = {
            Keys.OPTIMIZER_KWARGS: self.optimizer_options
        }
        self.objective_weights = torch.tensor([1.0])
        self.objective_thresholds = None
        self.outcome_constraints = None
        self.linear_constraints = None
        self.fixed_features = None
        self.pending_observations = None
        self.rounding_func = "func"
コード例 #6
0
 def setUp(self):
     self.outcomes = ["outcome_1", "outcome_2"]
     self.mll_class = SumMarginalLogLikelihood
     self.dtype = torch.float
     self.search_space_digest = SearchSpaceDigest(feature_names=[],
                                                  bounds=[],
                                                  task_features=[0])
     self.task_features = [0]
     Xs1, Ys1, Yvars1, bounds, _, _, _ = get_torch_test_data(
         dtype=self.dtype,
         task_features=self.search_space_digest.task_features)
     Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(
         dtype=self.dtype,
         task_features=self.search_space_digest.task_features)
     self.botorch_submodel_class_per_outcome = {
         self.outcomes[0]:
         choose_model_class(Yvars=Yvars1,
                            search_space_digest=self.search_space_digest),
         self.outcomes[1]:
         choose_model_class(Yvars=Yvars2,
                            search_space_digest=self.search_space_digest),
     }
     self.expected_submodel_type = FixedNoiseMultiTaskGP
     for submodel_cls in self.botorch_submodel_class_per_outcome.values():
         self.assertEqual(submodel_cls, FixedNoiseMultiTaskGP)
     self.Xs = Xs1 + Xs2
     self.Ys = Ys1 + Ys2
     self.Yvars = Yvars1 + Yvars2
     self.training_data = [
         TrainingData(X=X, Y=Y, Yvar=Yvar)
         for X, Y, Yvar in zip(self.Xs, self.Ys, self.Yvars)
     ]
     self.submodel_options_per_outcome = {
         self.outcomes[0]: {
             RANK: 1
         },
         self.outcomes[1]: {
             RANK: 2
         },
     }
     self.surrogate = ListSurrogate(
         botorch_submodel_class_per_outcome=self.
         botorch_submodel_class_per_outcome,
         mll_class=self.mll_class,
         submodel_options_per_outcome=self.submodel_options_per_outcome,
     )
     self.bounds = [(0.0, 1.0), (1.0, 4.0)]
     self.feature_names = ["x1", "x2"]
コード例 #7
0
ファイル: test_botorch_model.py プロジェクト: proteanblank/Ax
    def test_fixed_prior_BotorchModel(self, dtype=torch.float, cuda=False):
        Xs1, Ys1, Yvars1, bounds, _, fns, __package__ = get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=dtype,
                                                           cuda=cuda,
                                                           constant_noise=True)
        kwargs = {
            "prior": {
                "type": LKJCovariancePrior,
                "sd_prior": GammaPrior(2.0, 0.44),
                "eta": 0.6,
            }
        }
        model = BotorchModel(**kwargs)

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=[0],
                ),
                metric_names=["y", "w"],
            )
            _mock_fit_model.assert_called_once()

        # Check ranks
        model_list = model.model.models
        for i in range(1):
            self.assertIsInstance(
                model_list[i].task_covar_module.IndexKernelPrior,
                LKJCovariancePrior)
            self.assertEqual(
                model_list[i].task_covar_module.IndexKernelPrior.sd_prior.
                concentration,
                2.0,
            )
            self.assertEqual(
                model_list[i].task_covar_module.IndexKernelPrior.sd_prior.rate,
                0.44)
            self.assertEqual(
                model_list[i].task_covar_module.IndexKernelPrior.
                correlation_prior.eta,
                0.6,
            )
コード例 #8
0
ファイル: test_botorch_model.py プロジェクト: tangzhenyu/ax
 def test_BotorchModelOneOutcome(self):
     Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
         dtype=torch.float, cuda=False, constant_noise=True)
     for use_input_warping in (True, False):
         model = BotorchModel(use_input_warping=use_input_warping)
         with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
             model.fit(
                 Xs=Xs1,
                 Ys=Ys1,
                 Yvars=Yvars1,
                 bounds=bounds,
                 task_features=tfs,
                 feature_names=fns,
                 metric_names=mns[0],
                 fidelity_features=[],
             )
             _mock_fit_model.assert_called_once()
         X = torch.rand(2, 3, dtype=torch.float)
         f_mean, f_cov = model.predict(X)
         self.assertTrue(f_mean.shape == torch.Size([2, 1]))
         self.assertTrue(f_cov.shape == torch.Size([2, 1, 1]))
         if use_input_warping:
             self.assertTrue(hasattr(model.model, "input_transform"))
             self.assertIsInstance(model.model.input_transform, Warp)
         else:
             self.assertFalse(hasattr(model.model, "input_transform"))
コード例 #9
0
 def setUp(self):
     self.botorch_model_class = SingleTaskGP
     self.mll_class = ExactMarginalLogLikelihood
     self.device = torch.device("cpu")
     self.dtype = torch.float
     self.Xs, self.Ys, self.Yvars, self.bounds, _, _, _ = get_torch_test_data(
         dtype=self.dtype)
     self.training_data = TrainingData(X=self.Xs[0],
                                       Y=self.Ys[0],
                                       Yvar=self.Yvars[0])
     self.surrogate_kwargs = self.botorch_model_class.construct_inputs(
         self.training_data)
     self.surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class,
         mll_class=self.mll_class)
     self.task_features = []
     self.feature_names = ["x1", "x2"]
     self.metric_names = ["y"]
     self.fidelity_features = []
     self.target_fidelities = {1: 1.0}
     self.fixed_features = {1: 2.0}
     self.refit = True
     self.objective_weights = torch.tensor([-1.0, 1.0],
                                           dtype=self.dtype,
                                           device=self.device)
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                      ]))
     self.linear_constraints = (
         torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
         torch.tensor([[0.5], [1.0]]),
     )
     self.options = {}
コード例 #10
0
ファイル: test_surrogate.py プロジェクト: jeffersonp317/Ax
    def test_update(self, mock_fit_gpytorch, mock_MLL, mock_state_dict):
        self.surrogate.construct(
            training_data=self.training_data,
            fidelity_features=self.search_space_digest.fidelity_features,
        )
        # Check that correct arguments are passed to `fit`.
        with patch(f"{SURROGATE_PATH}.Surrogate.fit") as mock_fit:
            # Call `fit` by default
            self.surrogate.update(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                refit=self.refit,
                state_dict={"key": "val"},
            )
            mock_fit.assert_called_with(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                candidate_metadata=None,
                refit=self.refit,
                state_dict={"key": "val"},
            )

        # Check that the training data is correctly passed through to the
        # BoTorch `Model`.
        Xs, Ys, Yvars, bounds, _, _, _ = get_torch_test_data(
            dtype=self.dtype, offset=1.0
        )
        training_data = TrainingData(X=Xs[0], Y=Ys[0], Yvar=Yvars[0])
        surrogate_kwargs = self.botorch_model_class.construct_inputs(training_data)
        self.surrogate.update(
            training_data=training_data,
            search_space_digest=self.search_space_digest,
            metric_names=self.metric_names,
            refit=self.refit,
            state_dict={"key": "val"},
        )
        self.assertTrue(
            torch.equal(
                self.surrogate.model.train_inputs[0],
                surrogate_kwargs.get("train_X"),
            )
        )
        self.assertTrue(
            torch.equal(
                self.surrogate.model.train_targets,
                surrogate_kwargs.get("train_Y").squeeze(1),
            )
        )

        # If should not be reconstructed, check that error is raised.
        self.surrogate._constructed_manually = True
        with self.assertRaisesRegex(NotImplementedError, ".* constructed manually"):
            self.surrogate.update(
                training_data=self.training_data,
                search_space_digest=self.search_space_digest,
                metric_names=self.metric_names,
                refit=self.refit,
            )
コード例 #11
0
        def test_FullyBayesianBotorchModelConstraints(self):
            Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
                dtype=torch.float, cuda=False, constant_noise=True)
            Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(
                dtype=torch.float, cuda=False, constant_noise=True)
            # make infeasible
            Xs2[0] = -1 * Xs2[0]
            objective_weights = torch.tensor([-1.0, 1.0],
                                             dtype=torch.float,
                                             device=torch.device("cpu"))
            n = 3
            model = self.model_cls(
                num_samples=4,
                thinning=1,
                disable_progbar=True,
                max_tree_depth=1,
            )
            dummy_samples = _get_dummy_mcmc_samples(num_samples=4,
                                                    num_outputs=2,
                                                    dtype=torch.float,
                                                    device=Xs1[0].device)
            with mock.patch(RUN_INFERENCE_PATH,
                            side_effect=dummy_samples) as _mock_fit_model:
                model.fit(
                    Xs=Xs1 + Xs2,
                    Ys=Ys1 + Ys2,
                    Yvars=Yvars1 + Yvars2,
                    search_space_digest=SearchSpaceDigest(
                        feature_names=fns,
                        bounds=bounds,
                        task_features=tfs,
                    ),
                    metric_names=mns,
                )
                self.assertEqual(_mock_fit_model.call_count, 2)

            # because there are no feasible points:
            with self.assertRaises(ValueError):
                model.gen(n, bounds, objective_weights)
コード例 #12
0
ファイル: test_fully_bayesian.py プロジェクト: kjanoudi/Ax
 def test_FullyBayesianBotorchModelOneOutcome(self):
     Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
         dtype=torch.float, cuda=False, constant_noise=True)
     for use_input_warping, gp_kernel in product([True, False],
                                                 ["rbf", "matern"]):
         model = self.model_cls(
             use_input_warping=use_input_warping,
             num_samples=4,
             thinning=1,
             disable_progbar=True,
             max_tree_depth=1,
             gp_kernel=gp_kernel,
         )
         dummy_samples = _get_dummy_mcmc_samples(
             num_samples=4,
             num_outputs=1,
             dtype=torch.float,
             device=Xs1[0].device,
         )
         with mock.patch(RUN_INFERENCE_PATH,
                         side_effect=dummy_samples) as _mock_fit_model:
             model.fit(
                 Xs=Xs1,
                 Ys=Ys1,
                 Yvars=Yvars1,
                 search_space_digest=SearchSpaceDigest(
                     feature_names=fns,
                     bounds=bounds,
                     task_features=tfs,
                 ),
                 metric_names=mns[0],
             )
             _mock_fit_model.assert_called_once()
         X = torch.rand(2, 3, dtype=torch.float)
         f_mean, f_cov = model.predict(X)
         self.assertTrue(f_mean.shape == torch.Size([2, 1]))
         self.assertTrue(f_cov.shape == torch.Size([2, 1, 1]))
         model_list = model.model.models
         self.assertTrue(len(model_list) == 1)
         if use_input_warping:
             self.assertTrue(hasattr(model_list[0], "input_transform"))
             self.assertIsInstance(model_list[0].input_transform, Warp)
         else:
             self.assertFalse(hasattr(model_list[0], "input_transform"))
コード例 #13
0
 def test_BotorchModelOneOutcome(self):
     Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
         dtype=torch.float, cuda=False, constant_noise=True)
     model = BotorchModel()
     with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
         model.fit(
             Xs=Xs1,
             Ys=Ys1,
             Yvars=Yvars1,
             bounds=bounds,
             task_features=tfs,
             feature_names=fns,
             metric_names=mns[0],
             fidelity_features=[],
         )
         _mock_fit_model.assert_called_once()
     X = torch.rand(2, 3, dtype=torch.float)
     f_mean, f_cov = model.predict(X)
     self.assertTrue(f_mean.shape == torch.Size([2, 1]))
     self.assertTrue(f_cov.shape == torch.Size([2, 1, 1]))
コード例 #14
0
 def test_BotorchModelOneOutcome(self):
     Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
         dtype=torch.float, cuda=False, constant_noise=True
     )
     for use_input_warping, use_loocv_pseudo_likelihood in product(
         (True, False), (True, False)
     ):
         model = BotorchModel(
             use_input_warping=use_input_warping,
             use_loocv_pseudo_likelihood=use_loocv_pseudo_likelihood,
         )
         with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
             model.fit(
                 Xs=Xs1,
                 Ys=Ys1,
                 Yvars=Yvars1,
                 bounds=bounds,
                 task_features=tfs,
                 feature_names=fns,
                 metric_names=mns[0],
                 fidelity_features=[],
             )
             _mock_fit_model.assert_called_once()
             if use_loocv_pseudo_likelihood:
                 mll_cls = LeaveOneOutPseudoLikelihood
             else:
                 mll_cls = ExactMarginalLogLikelihood
             self.assertIsInstance(
                 _mock_fit_model.mock_calls[0][1][0],
                 mll_cls,
             )
         X = torch.rand(2, 3, dtype=torch.float)
         f_mean, f_cov = model.predict(X)
         self.assertTrue(f_mean.shape == torch.Size([2, 1]))
         self.assertTrue(f_cov.shape == torch.Size([2, 1, 1]))
         if use_input_warping:
             self.assertTrue(hasattr(model.model, "input_transform"))
             self.assertIsInstance(model.model.input_transform, Warp)
         else:
             self.assertFalse(hasattr(model.model, "input_transform"))
コード例 #15
0
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.acquisition_class = Acquisition
        self.botorch_acqf_class = qExpectedImprovement
        self.acquisition_options = ACQ_OPTIONS
        self.model = BoTorchModel(
            surrogate=self.surrogate,
            acquisition_class=self.acquisition_class,
            botorch_acqf_class=self.botorch_acqf_class,
            acquisition_options=self.acquisition_options,
        )

        self.dtype = torch.float
        self.device = torch.device("cpu")
        tkwargs = {"dtype": self.dtype, "device": self.device}
        Xs1, Ys1, Yvars1, self.bounds, _, _, _ = get_torch_test_data(
            dtype=self.dtype)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=self.dtype,
                                                           offset=1.0)
        self.Xs = Xs1
        self.Ys = Ys1
        self.Yvars = Yvars1
        self.X_test = Xs2[0]
        self.block_design_training_data = TrainingData(Xs=self.Xs,
                                                       Ys=self.Ys,
                                                       Yvars=self.Yvars)
        self.non_block_design_training_data = TrainingData(
            Xs=Xs1 + Xs2,
            Ys=Ys1 + Ys2,
            Yvars=Yvars1 + Yvars2,
        )
        self.search_space_digest = SearchSpaceDigest(
            feature_names=["x1", "x2", "x3"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
        )
        self.mf_search_space_digest = SearchSpaceDigest(
            feature_names=["x1", "x2", "x3"],
            bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
            task_features=[],
            fidelity_features=[2],
            target_fidelities={1: 1.0},
        )
        self.metric_names = ["y"]
        self.metric_names_for_list_surrogate = ["y1", "y2"]
        self.candidate_metadata = []
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024
        }
        self.model_gen_options = {
            Keys.OPTIMIZER_KWARGS: self.optimizer_options
        }
        self.objective_weights = torch.tensor([1.0], **tkwargs)
        self.moo_objective_weights = torch.tensor([1.0, 1.5, 0.0], **tkwargs)
        self.moo_objective_thresholds = torch.tensor(
            [0.5, 1.5, float("nan")], **tkwargs)
        self.outcome_constraints = None
        self.linear_constraints = None
        self.fixed_features = None
        self.pending_observations = None
        self.rounding_func = "func"
        self.moo_training_data = TrainingData(
            Xs=self.Xs * 3,
            Ys=self.non_block_design_training_data.Ys + self.Ys,
            Yvars=self.Yvars * 3,
        )
        self.moo_metric_names = ["y1", "y2", "y3"]
コード例 #16
0
ファイル: test_botorch_model.py プロジェクト: proteanblank/Ax
    def test_BotorchModel(self, dtype=torch.float, cuda=False):
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(dtype=dtype,
                                                           cuda=cuda,
                                                           constant_noise=True)
        for use_input_warping in (True, False):
            for use_loocv_pseudo_likelihood in (True, False):
                model = BotorchModel(
                    use_input_warping=use_input_warping,
                    use_loocv_pseudo_likelihood=use_loocv_pseudo_likelihood,
                )
                # Test ModelListGP
                # make training data different for each output
                Xs2_diff = [Xs2[0] + 0.1]
                with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
                    model.fit(
                        Xs=Xs1 + Xs2_diff,
                        Ys=Ys1 + Ys2,
                        Yvars=Yvars1 + Yvars2,
                        search_space_digest=SearchSpaceDigest(
                            feature_names=fns,
                            bounds=bounds,
                            task_features=tfs,
                        ),
                        metric_names=mns,
                    )
                    _mock_fit_model.assert_called_once()
                    if use_loocv_pseudo_likelihood:
                        mll_cls = LeaveOneOutPseudoLikelihood
                    else:
                        mll_cls = ExactMarginalLogLikelihood
                    mlls = _mock_fit_model.mock_calls[0][1][0].mlls
                    self.assertTrue(len(mlls) == 2)
                    for mll in mlls:
                        self.assertIsInstance(mll, mll_cls)
                # Check attributes
                self.assertTrue(torch.equal(model.Xs[0], Xs1[0]))
                self.assertTrue(torch.equal(model.Xs[1], Xs2_diff[0]))
                self.assertEqual(model.dtype, Xs1[0].dtype)
                self.assertEqual(model.device, Xs1[0].device)
                self.assertIsInstance(model.model, ModelListGP)

                # Check fitting
                model_list = model.model.models
                self.assertTrue(
                    torch.equal(model_list[0].train_inputs[0], Xs1[0]))
                self.assertTrue(
                    torch.equal(model_list[1].train_inputs[0], Xs2_diff[0]))
                self.assertTrue(
                    torch.equal(model_list[0].train_targets, Ys1[0].view(-1)))
                self.assertTrue(
                    torch.equal(model_list[1].train_targets, Ys2[0].view(-1)))
                self.assertIsInstance(model_list[0].likelihood,
                                      _GaussianLikelihoodBase)
                self.assertIsInstance(model_list[1].likelihood,
                                      _GaussianLikelihoodBase)
                if use_input_warping:
                    self.assertTrue(model.use_input_warping)
                for m in model_list:
                    if use_input_warping:
                        self.assertTrue(hasattr(m, "input_transform"))
                        self.assertIsInstance(m.input_transform, Warp)
                    else:
                        self.assertFalse(hasattr(m, "input_transform"))

            # Test batched multi-output FixedNoiseGP
            with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
                model.fit(
                    Xs=Xs1 + Xs2,
                    Ys=Ys1 + Ys2,
                    Yvars=Yvars1 + Yvars2,
                    search_space_digest=SearchSpaceDigest(
                        feature_names=fns,
                        bounds=bounds,
                        task_features=tfs,
                    ),
                    metric_names=mns,
                )
                _mock_fit_model.assert_called_once()

            # Check attributes
            self.assertTrue(torch.equal(model.Xs[0], Xs1[0]))
            self.assertTrue(torch.equal(model.Xs[1], Xs2[0]))
            self.assertEqual(model.dtype, Xs1[0].dtype)
            self.assertEqual(model.device, Xs1[0].device)
            if use_input_warping:
                self.assertIsInstance(model.model, ModelListGP)
                models = model.model.models
            else:
                models = [model.model]
            Ys = [Ys1[0], Ys2[0]]
            for i, m in enumerate(models):
                self.assertIsInstance(m, FixedNoiseGP)
                expected_train_inputs = Xs1[0]

                if not use_input_warping:
                    expected_train_inputs = expected_train_inputs.unsqueeze(
                        0).expand(torch.Size([2]) + Xs1[0].shape)
                    expected_train_targets = torch.cat(Ys1 + Ys2,
                                                       dim=-1).permute(1, 0)
                else:
                    expected_train_targets = Ys[i].squeeze(-1)
                # Check fitting
                # train inputs should be `o x n x 1`
                self.assertTrue(
                    torch.equal(
                        m.train_inputs[0],
                        expected_train_inputs,
                    ))
                # train targets should be `o x n`
                self.assertTrue(
                    torch.equal(
                        m.train_targets,
                        expected_train_targets,
                    ))
                self.assertIsInstance(m.likelihood, _GaussianLikelihoodBase)

            # Check infeasible cost can be computed on the model
            device = torch.device("cuda") if cuda else torch.device("cpu")
            objective_weights = torch.tensor([1.0, 0.0],
                                             dtype=dtype,
                                             device=device)
            objective_transform = get_objective_weights_transform(
                objective_weights)
            infeasible_cost = torch.tensor(
                get_infeasible_cost(X=Xs1[0],
                                    model=model.model,
                                    objective=objective_transform))
            expected_infeasible_cost = -1 * torch.min(
                objective_transform(
                    model.model.posterior(Xs1[0]).mean -
                    6 * model.model.posterior(Xs1[0]).variance.sqrt()).min(),
                torch.tensor(0.0, dtype=dtype, device=device),
            )
            self.assertTrue(
                torch.abs(infeasible_cost - expected_infeasible_cost) < 1e-5)

            # Check prediction
            X = torch.tensor([[6.0, 7.0, 8.0]], dtype=dtype, device=device)
            f_mean, f_cov = model.predict(X)
            self.assertTrue(f_mean.shape == torch.Size([1, 2]))
            self.assertTrue(f_cov.shape == torch.Size([1, 2, 2]))

            # Check generation
            objective_weights = torch.tensor([1.0, 0.0],
                                             dtype=dtype,
                                             device=device)
            outcome_constraints = (
                torch.tensor([[0.0, 1.0]], dtype=dtype, device=device),
                torch.tensor([[5.0]], dtype=dtype, device=device),
            )
            linear_constraints = (
                torch.tensor([[0.0, 1.0, 1.0]]),
                torch.tensor([[100.0]]),
            )
            fixed_features = None
            pending_observations = [
                torch.tensor([[1.0, 3.0, 4.0]], dtype=dtype, device=device),
                torch.tensor([[2.0, 6.0, 8.0]], dtype=dtype, device=device),
            ]
            n = 3

            X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]],
                                   dtype=dtype,
                                   device=device)
            acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]],
                                       dtype=dtype,
                                       device=device)
            model_gen_options = {}
            # test sequential optimize
            with mock.patch(
                    "ax.models.torch.botorch_defaults.optimize_acqf",
                    return_value=(X_dummy, acqfv_dummy),
            ) as mock_optimize_acqf:
                Xgen, wgen, gen_metadata, cand_metadata = model.gen(
                    n=n,
                    bounds=bounds,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    linear_constraints=linear_constraints,
                    fixed_features=fixed_features,
                    pending_observations=pending_observations,
                    model_gen_options=model_gen_options,
                    rounding_func=dummy_func,
                )
                # note: gen() always returns CPU tensors
                self.assertTrue(torch.equal(Xgen, X_dummy.cpu()))
                self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=dtype)))

            # test joint optimize
            with mock.patch(
                    "ax.models.torch.botorch_defaults.optimize_acqf",
                    return_value=(X_dummy, acqfv_dummy),
            ) as mock_optimize_acqf:
                Xgen, wgen, gen_metadata, cand_metadata = model.gen(
                    n=n,
                    bounds=bounds,
                    objective_weights=objective_weights,
                    outcome_constraints=None,
                    linear_constraints=None,
                    fixed_features=fixed_features,
                    pending_observations=pending_observations,
                    model_gen_options={
                        "optimizer_kwargs": {
                            "joint_optimization": True
                        }
                    },
                )
                # note: gen() always returns CPU tensors
                self.assertTrue(torch.equal(Xgen, X_dummy.cpu()))
                self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=dtype)))
                mock_optimize_acqf.assert_called_once()

            # test that fidelity features are unsupported
            with self.assertRaises(NotImplementedError):
                Xgen, wgen = model.gen(
                    n=n,
                    bounds=bounds,
                    objective_weights=objective_weights,
                    outcome_constraints=None,
                    linear_constraints=None,
                    fixed_features=fixed_features,
                    pending_observations=pending_observations,
                    model_gen_options={
                        "optimizer_kwargs": {
                            "joint_optimization": True
                        }
                    },
                    target_fidelities={0: 3.0},
                )

            # test get_rounding_func
            dummy_rounding = get_rounding_func(rounding_func=dummy_func)
            X_temp = torch.rand(1, 2, 3, 4)
            self.assertTrue(torch.equal(X_temp, dummy_rounding(X_temp)))

            # Check best point selection
            xbest = model.best_point(bounds=bounds,
                                     objective_weights=objective_weights)
            xbest = model.best_point(
                bounds=bounds,
                objective_weights=objective_weights,
                fixed_features={0: 100.0},
            )
            self.assertIsNone(xbest)

            # test that fidelity features are unsupported
            with self.assertRaises(NotImplementedError):
                xbest = model.best_point(
                    bounds=bounds,
                    objective_weights=objective_weights,
                    fixed_features={0: 100.0},
                    target_fidelities={0: 3.0},
                )

            # Test cross-validation
            mean, variance = model.cross_validate(
                Xs_train=Xs1 + Xs2,
                Ys_train=Ys1 + Ys2,
                Yvars_train=Yvars1 + Yvars2,
                X_test=torch.tensor([[1.2, 3.2, 4.2], [2.4, 5.2, 3.2]],
                                    dtype=dtype,
                                    device=device),
            )
            self.assertTrue(mean.shape == torch.Size([2, 2]))
            self.assertTrue(variance.shape == torch.Size([2, 2, 2]))

            # Test cross-validation with refit_on_cv
            model.refit_on_cv = True
            mean, variance = model.cross_validate(
                Xs_train=Xs1 + Xs2,
                Ys_train=Ys1 + Ys2,
                Yvars_train=Yvars1 + Yvars2,
                X_test=torch.tensor([[1.2, 3.2, 4.2], [2.4, 5.2, 3.2]],
                                    dtype=dtype,
                                    device=device),
            )
            self.assertTrue(mean.shape == torch.Size([2, 2]))
            self.assertTrue(variance.shape == torch.Size([2, 2, 2]))

            # Test update
            model.refit_on_update = False
            model.update(Xs=Xs2 + Xs2, Ys=Ys2 + Ys2, Yvars=Yvars2 + Yvars2)

            # Test feature_importances
            importances = model.feature_importances()
            self.assertEqual(importances.shape, torch.Size([2, 1, 3]))

            # When calling update directly, the data is completely overwritten.
            self.assertTrue(torch.equal(model.Xs[0], Xs2[0]))
            self.assertTrue(torch.equal(model.Xs[1], Xs2[0]))
            self.assertTrue(torch.equal(model.Ys[0], Ys2[0]))
            self.assertTrue(torch.equal(model.Yvars[0], Yvars2[0]))

            model.refit_on_update = True
            with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
                model.update(Xs=Xs2 + Xs2, Ys=Ys2 + Ys2, Yvars=Yvars2 + Yvars2)

            # test unfit model CV, update, and feature_importances
            unfit_model = BotorchModel()
            with self.assertRaises(RuntimeError):
                unfit_model.cross_validate(
                    Xs_train=Xs1 + Xs2,
                    Ys_train=Ys1 + Ys2,
                    Yvars_train=Yvars1 + Yvars2,
                    X_test=Xs1[0],
                )
            with self.assertRaises(RuntimeError):
                unfit_model.update(Xs=Xs1 + Xs2,
                                   Ys=Ys1 + Ys2,
                                   Yvars=Yvars1 + Yvars2)
            with self.assertRaises(RuntimeError):
                unfit_model.feature_importances()

            # Test loading state dict
            tkwargs = {"device": device, "dtype": dtype}
            true_state_dict = {
                "mean_module.constant": [3.5004],
                "covar_module.raw_outputscale":
                2.2438,
                "covar_module.base_kernel.raw_lengthscale":
                [[-0.9274, -0.9274, -0.9274]],
                "covar_module.base_kernel.raw_lengthscale_constraint.lower_bound":
                0.1,
                "covar_module.base_kernel.raw_lengthscale_constraint.upper_bound":
                2.5,
                "covar_module.base_kernel.lengthscale_prior.concentration":
                3.0,
                "covar_module.base_kernel.lengthscale_prior.rate":
                6.0,
                "covar_module.raw_outputscale_constraint.lower_bound":
                0.2,
                "covar_module.raw_outputscale_constraint.upper_bound":
                2.6,
                "covar_module.outputscale_prior.concentration":
                2.0,
                "covar_module.outputscale_prior.rate":
                0.15,
            }
            true_state_dict = {
                key: torch.tensor(val, **tkwargs)
                for key, val in true_state_dict.items()
            }
            model = get_and_fit_model(
                Xs=Xs1,
                Ys=Ys1,
                Yvars=Yvars1,
                task_features=[],
                fidelity_features=[],
                metric_names=mns[0],
                state_dict=true_state_dict,
                refit_model=False,
            )
            for k, v in chain(model.named_parameters(), model.named_buffers()):
                self.assertTrue(torch.equal(true_state_dict[k], v))

            # Test for some change in model parameters & buffer for refit_model=True
            true_state_dict["mean_module.constant"] += 0.1
            true_state_dict["covar_module.raw_outputscale"] += 0.1
            true_state_dict["covar_module.base_kernel.raw_lengthscale"] += 0.1
            model = get_and_fit_model(
                Xs=Xs1,
                Ys=Ys1,
                Yvars=Yvars1,
                task_features=[],
                fidelity_features=[],
                metric_names=mns[0],
                state_dict=true_state_dict,
                refit_model=True,
            )
            self.assertTrue(
                any(not torch.equal(true_state_dict[k], v) for k, v in chain(
                    model.named_parameters(), model.named_buffers())))

        # Test that recommend_best_out_of_sample_point errors w/o _get_best_point_acqf
        model = BotorchModel(
            best_point_recommender=recommend_best_out_of_sample_point)
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2_diff,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
        with self.assertRaises(RuntimeError):
            xbest = model.best_point(bounds=bounds,
                                     objective_weights=objective_weights)
コード例 #17
0
        def test_FullyBayesianBotorchModel(self,
                                           dtype=torch.float,
                                           cuda=False):
            Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
                dtype=dtype, cuda=cuda, constant_noise=True)
            Xs2, Ys2, Yvars2, _, _, _, _ = get_torch_test_data(
                dtype=dtype, cuda=cuda, constant_noise=True)
            Yvars_inferred_noise = [
                torch.full_like(Yvars1[0], float("nan")),
                torch.full_like(Yvars2[0], float("nan")),
            ]
            # make input different for each output
            Xs2_diff = [Xs2[0] + 0.1]
            Xs = Xs1 + Xs2_diff
            Ys = Ys1 + Ys2

            for inferred_noise, use_input_warping, use_saas in product(
                (True, False), repeat=3):
                Yvars = Yvars_inferred_noise if inferred_noise else Yvars1 + Yvars2
                model = self.model_cls(
                    use_input_warping=use_input_warping,
                    thinning=1,
                    num_samples=4,
                    use_saas=use_saas,
                    disable_progbar=True,
                    max_tree_depth=1,
                )
                if use_input_warping:
                    self.assertTrue(model.use_input_warping)
                # Test ModelListGP
                # make training data different for each output
                tkwargs = {"dtype": dtype, "device": Xs1[0].device}
                dummy_samples_list = _get_dummy_mcmc_samples(num_samples=4,
                                                             num_outputs=2,
                                                             **tkwargs)
                for dummy_samples in dummy_samples_list:
                    if use_input_warping:
                        dummy_samples["c0"] = (
                            torch.rand(4, 1, Xs1[0].shape[-1], **tkwargs) * 0.5
                            + 0.1)
                        dummy_samples["c1"] = (
                            torch.rand(4, 1, Xs1[0].shape[-1], **tkwargs) * 0.5
                            + 0.1)
                    if inferred_noise:
                        dummy_samples["noise"] = torch.rand(
                            4, 1,
                            **tkwargs).clamp_min(MIN_INFERRED_NOISE_LEVEL)

                with mock.patch(
                        RUN_INFERENCE_PATH,
                        side_effect=dummy_samples_list,
                ) as _mock_fit_model:
                    model.fit(
                        Xs=Xs,
                        Ys=Ys,
                        Yvars=Yvars,
                        search_space_digest=SearchSpaceDigest(
                            feature_names=fns,
                            bounds=bounds,
                            task_features=tfs,
                        ),
                        metric_names=mns,
                    )
                    self.assertEqual(_mock_fit_model.call_count, 2)
                    for i, call in enumerate(_mock_fit_model.call_args_list):
                        _, ckwargs = call
                        X = Xs[i]
                        Y = Ys[i]
                        Yvar = Yvars[i]
                        self.assertIs(ckwargs["pyro_model"], pyro_model)

                        self.assertTrue(torch.equal(ckwargs["X"], X))
                        self.assertTrue(torch.equal(ckwargs["Y"], Y))
                        if inferred_noise:
                            self.assertTrue(torch.isnan(ckwargs["Yvar"]).all())
                        else:
                            self.assertTrue(torch.equal(ckwargs["Yvar"], Yvar))
                        self.assertEqual(ckwargs["num_samples"], 4)
                        self.assertEqual(ckwargs["warmup_steps"], 1024)
                        self.assertEqual(ckwargs["max_tree_depth"], 1)
                        self.assertTrue(ckwargs["disable_progbar"])
                        self.assertEqual(ckwargs["use_input_warping"],
                                         use_input_warping)
                        self.assertEqual(ckwargs["use_saas"], use_saas)

                        # Check attributes
                        self.assertTrue(torch.equal(model.Xs[i], Xs[i]))
                        self.assertEqual(model.dtype, Xs[i].dtype)
                        self.assertEqual(model.device, Xs[i].device)
                        self.assertIsInstance(model.model, ModelListGP)

                        # Check fitting
                        # Note each model in the model list is a batched model, where
                        # the batch dim corresponds to the MCMC samples
                        model_list = model.model.models
                        # Put model in `eval` mode to transform the train inputs.
                        m = model_list[i].eval()
                        # check mcmc samples
                        dummy_samples = dummy_samples_list[i]
                        expected_train_inputs = Xs[i].expand(4, *Xs[i].shape)
                        if use_input_warping:
                            # train inputs should be warped inputs
                            expected_train_inputs = m.input_transform(
                                expected_train_inputs)
                        self.assertTrue(
                            torch.equal(
                                m.train_inputs[0],
                                expected_train_inputs,
                            ))
                        self.assertTrue(
                            torch.equal(
                                m.train_targets,
                                Ys[i].view(1, -1).expand(4, Ys[i].numel()),
                            ))
                        expected_noise = (dummy_samples["noise"].view(
                            m.likelihood.noise.shape) if inferred_noise else
                                          Yvars[i].view(1, -1).expand(
                                              4, Yvars[i].numel()))
                        self.assertTrue(
                            torch.allclose(
                                m.likelihood.noise.detach(),
                                expected_noise,
                            ))
                        self.assertIsInstance(m.likelihood,
                                              _GaussianLikelihoodBase)

                        self.assertTrue(
                            torch.allclose(
                                m.covar_module.base_kernel.lengthscale.detach(
                                ),
                                dummy_samples["lengthscale"].view(
                                    m.covar_module.base_kernel.lengthscale.
                                    shape),
                            ))
                        self.assertTrue(
                            torch.allclose(
                                m.covar_module.outputscale.detach(),
                                dummy_samples["outputscale"].view(
                                    m.covar_module.outputscale.shape),
                            ))
                        self.assertTrue(
                            torch.allclose(
                                m.mean_module.constant.detach(),
                                dummy_samples["mean"].view(
                                    m.mean_module.constant.shape),
                            ))
                        if use_input_warping:
                            self.assertTrue(hasattr(m, "input_transform"))
                            self.assertIsInstance(m.input_transform, Warp)
                            self.assertTrue(
                                torch.equal(
                                    m.input_transform.concentration0,
                                    dummy_samples_list[i]["c0"],
                                ))
                            self.assertTrue(
                                torch.equal(
                                    m.input_transform.concentration1,
                                    dummy_samples_list[i]["c1"],
                                ))
                        else:
                            self.assertFalse(hasattr(m, "input_transform"))
                # test that multi-task is not implemented
                (
                    Xs_mt,
                    Ys_mt,
                    Yvars_mt,
                    bounds_mt,
                    tfs_mt,
                    fns_mt,
                    mns_mt,
                ) = get_torch_test_data(dtype=dtype,
                                        cuda=cuda,
                                        constant_noise=True,
                                        task_features=[2])
                with mock.patch(
                        RUN_INFERENCE_PATH,
                        side_effect=dummy_samples_list,
                ) as _mock_fit_model, self.assertRaises(NotImplementedError):
                    model.fit(
                        Xs=Xs_mt,
                        Ys=Ys_mt,
                        Yvars=Yvars_mt,
                        search_space_digest=SearchSpaceDigest(
                            feature_names=fns_mt,
                            bounds=bounds_mt,
                            task_features=tfs_mt,
                        ),
                        metric_names=mns_mt,
                    )
                with mock.patch(
                        RUN_INFERENCE_PATH,
                        side_effect=dummy_samples_list,
                ) as _mock_fit_model, self.assertRaises(NotImplementedError):
                    model.fit(
                        Xs=Xs1 + Xs2,
                        Ys=Ys1 + Ys2,
                        Yvars=Yvars1 + Yvars2,
                        search_space_digest=SearchSpaceDigest(
                            feature_names=fns,
                            bounds=bounds,
                            fidelity_features=[0],
                        ),
                        metric_names=mns,
                    )
                # fit model with same inputs (otherwise X_observed will be None)
                model = self.model_cls(
                    use_input_warping=use_input_warping,
                    thinning=1,
                    num_samples=4,
                    use_saas=use_saas,
                    disable_progbar=True,
                    max_tree_depth=1,
                )
                Yvars = Yvars1 + Yvars2
                dummy_samples_list = _get_dummy_mcmc_samples(num_samples=4,
                                                             num_outputs=2,
                                                             **tkwargs)
                with mock.patch(
                        RUN_INFERENCE_PATH,
                        side_effect=dummy_samples_list,
                ) as _mock_fit_model:
                    model.fit(
                        Xs=Xs1 + Xs2,
                        Ys=Ys1 + Ys2,
                        Yvars=Yvars,
                        search_space_digest=SearchSpaceDigest(
                            feature_names=fns,
                            bounds=bounds,
                            task_features=tfs,
                        ),
                        metric_names=mns,
                    )
                # Check infeasible cost can be computed on the model
                device = torch.device("cuda") if cuda else torch.device("cpu")
                objective_weights = torch.tensor([1.0, 0.0],
                                                 dtype=dtype,
                                                 device=device)
                objective_transform = get_objective_weights_transform(
                    objective_weights)
                infeasible_cost = torch.tensor(
                    get_infeasible_cost(X=Xs1[0],
                                        model=model.model,
                                        objective=objective_transform))
                expected_infeasible_cost = -1 * torch.min(
                    objective_transform(
                        model.model.posterior(Xs1[0]).mean - 6 *
                        model.model.posterior(Xs1[0]).variance.sqrt()).min(),
                    torch.tensor(0.0, dtype=dtype, device=device),
                )
                self.assertTrue(
                    torch.abs(infeasible_cost -
                              expected_infeasible_cost) < 1e-5)

                # Check prediction
                X = torch.tensor([[6.0, 7.0, 8.0]], **tkwargs)
                f_mean, f_cov = model.predict(X)
                self.assertTrue(f_mean.shape == torch.Size([1, 2]))
                self.assertTrue(f_cov.shape == torch.Size([1, 2, 2]))

                # Check generation
                objective_weights = torch.tensor(
                    [1.0, 0.0] if self.model_cls is FullyBayesianBotorchModel
                    else [1.0, 1.0],
                    **tkwargs,
                )
                outcome_constraints = (
                    torch.tensor([[0.0, 1.0]], **tkwargs),
                    torch.tensor([[5.0]], **tkwargs),
                )
                gen_kwargs = ({
                    "objective_thresholds": torch.zeros(2, **tkwargs)
                } if self.model_cls is FullyBayesianMOOBotorchModel else {})
                linear_constraints = (
                    torch.tensor([[0.0, 1.0, 1.0]]),
                    torch.tensor([[100.0]]),
                )
                fixed_features = None
                pending_observations = [
                    torch.tensor([[1.0, 3.0, 4.0]], **tkwargs),
                    torch.tensor([[2.0, 6.0, 8.0]], **tkwargs),
                ]
                n = 3

                X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
                acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
                model_gen_options = {
                    Keys.OPTIMIZER_KWARGS: {
                        "maxiter": 1
                    },
                    Keys.ACQF_KWARGS: {
                        "mc_samples": 3
                    },
                }
                # test sequential optimize with constraints
                with mock.patch(
                        "ax.models.torch.botorch_defaults.optimize_acqf",
                        return_value=(X_dummy, acqfv_dummy),
                ) as _:
                    Xgen, wgen, gen_metadata, cand_metadata = model.gen(
                        n=n,
                        bounds=bounds,
                        objective_weights=objective_weights,
                        outcome_constraints=outcome_constraints,
                        linear_constraints=linear_constraints,
                        fixed_features=fixed_features,
                        pending_observations=pending_observations,
                        model_gen_options=model_gen_options,
                        rounding_func=dummy_func,
                        **gen_kwargs,
                    )
                    # note: gen() always returns CPU tensors
                    self.assertTrue(torch.equal(Xgen, X_dummy.cpu()))
                    self.assertTrue(
                        torch.equal(wgen, torch.ones(n, dtype=dtype)))

                # actually test optimization for 1 step without constraints
                with mock.patch(
                        "ax.models.torch.botorch_defaults.optimize_acqf",
                        wraps=optimize_acqf,
                        return_value=(X_dummy, acqfv_dummy),
                ) as _:
                    Xgen, wgen, gen_metadata, cand_metadata = model.gen(
                        n=n,
                        bounds=bounds,
                        objective_weights=objective_weights,
                        outcome_constraints=outcome_constraints,
                        fixed_features=fixed_features,
                        pending_observations=pending_observations,
                        model_gen_options=model_gen_options,
                        **gen_kwargs,
                    )
                    # note: gen() always returns CPU tensors
                    self.assertTrue(torch.equal(Xgen, X_dummy.cpu()))
                    self.assertTrue(
                        torch.equal(wgen, torch.ones(n, dtype=dtype)))

                # Check best point selection
                xbest = model.best_point(bounds=bounds,
                                         objective_weights=objective_weights)
                xbest = model.best_point(
                    bounds=bounds,
                    objective_weights=objective_weights,
                    fixed_features={0: 100.0},
                )
                self.assertIsNone(xbest)

                # Test cross-validation
                mean, variance = model.cross_validate(
                    Xs_train=Xs1 + Xs2,
                    Ys_train=Ys,
                    Yvars_train=Yvars,
                    X_test=torch.tensor([[1.2, 3.2, 4.2], [2.4, 5.2, 3.2]],
                                        dtype=dtype,
                                        device=device),
                )
                self.assertTrue(mean.shape == torch.Size([2, 2]))
                self.assertTrue(variance.shape == torch.Size([2, 2, 2]))

                # Test cross-validation with refit_on_cv
                model.refit_on_cv = True
                with mock.patch(
                        RUN_INFERENCE_PATH,
                        side_effect=dummy_samples_list,
                ) as _mock_fit_model:
                    mean, variance = model.cross_validate(
                        Xs_train=Xs1 + Xs2,
                        Ys_train=Ys,
                        Yvars_train=Yvars,
                        X_test=torch.tensor(
                            [[1.2, 3.2, 4.2], [2.4, 5.2, 3.2]],
                            dtype=dtype,
                            device=device,
                        ),
                    )
                    self.assertTrue(mean.shape == torch.Size([2, 2]))
                    self.assertTrue(variance.shape == torch.Size([2, 2, 2]))

                # Test update
                model.refit_on_update = False
                model.update(Xs=Xs2 + Xs2, Ys=Ys2 + Ys2, Yvars=Yvars2 + Yvars2)

                # Test feature_importances
                importances = model.feature_importances()
                self.assertEqual(importances.shape, torch.Size([2, 1, 3]))

                # When calling update directly, the data is completely overwritten.
                self.assertTrue(torch.equal(model.Xs[0], Xs2[0]))
                self.assertTrue(torch.equal(model.Xs[1], Xs2[0]))
                self.assertTrue(torch.equal(model.Ys[0], Ys2[0]))
                self.assertTrue(torch.equal(model.Yvars[0], Yvars2[0]))

                model.refit_on_update = True
                with mock.patch(
                        RUN_INFERENCE_PATH,
                        side_effect=dummy_samples_list) as _mock_fit_model:
                    model.update(Xs=Xs2 + Xs2,
                                 Ys=Ys2 + Ys2,
                                 Yvars=Yvars2 + Yvars2)

                # test unfit model CV, update, and feature_importances
                unfit_model = self.model_cls()
                with self.assertRaises(RuntimeError):
                    unfit_model.cross_validate(
                        Xs_train=Xs1 + Xs2,
                        Ys_train=Ys1 + Ys2,
                        Yvars_train=Yvars1 + Yvars2,
                        X_test=Xs1[0],
                    )
                with self.assertRaises(RuntimeError):
                    unfit_model.update(Xs=Xs1 + Xs2,
                                       Ys=Ys1 + Ys2,
                                       Yvars=Yvars1 + Yvars2)
                with self.assertRaises(RuntimeError):
                    unfit_model.feature_importances()
コード例 #18
0
        def test_FullyBayesianBotorchModelPyro(self, cuda=False):
            for dtype in (torch.float, torch.double):
                Xs1, Ys1, raw_Yvars1, bounds, tfs, fns, mns = get_torch_test_data(
                    dtype=dtype, cuda=cuda, constant_noise=True)
                Xs2, Ys2, raw_Yvars2, _, _, _, _ = get_torch_test_data(
                    dtype=dtype, cuda=cuda, constant_noise=True)
                for inferred_noise, use_input_warping, use_saas in product(
                    (False, True), repeat=3):
                    model = self.model_cls(
                        num_samples=4,
                        warmup_steps=0,
                        thinning=1,
                        use_input_warping=use_input_warping,
                        use_saas=use_saas,
                        disable_progbar=True,
                        max_tree_depth=1,
                    )
                    if inferred_noise:
                        Yvars1 = [torch.full_like(raw_Yvars1[0], float("nan"))]
                        Yvars2 = [torch.full_like(raw_Yvars2[0], float("nan"))]
                    else:
                        Yvars1 = raw_Yvars1
                        Yvars2 = raw_Yvars2

                    dummy_samples = _get_dummy_mcmc_samples(
                        num_samples=4,
                        num_outputs=2,
                        dtype=dtype,
                        device=Xs1[0].device,
                        use_saas=use_saas,
                    )
                    with ExitStack() as es:
                        _mock_fit_model = es.enter_context(
                            mock.patch(RUN_INFERENCE_PATH,
                                       side_effect=dummy_samples))
                        model.fit(
                            Xs=Xs1 + Xs2,
                            Ys=Ys1 + Ys2,
                            Yvars=Yvars1 + Yvars2,
                            search_space_digest=SearchSpaceDigest(
                                feature_names=fns,
                                bounds=bounds,
                                task_features=tfs,
                            ),
                            metric_names=mns,
                        )
                        # check run_inference arguments
                        self.assertEqual(_mock_fit_model.call_count, 2)
                        _, ckwargs = _mock_fit_model.call_args
                        self.assertIs(ckwargs["pyro_model"], pyro_model)

                        self.assertTrue(torch.equal(ckwargs["X"], Xs1[0]))
                        self.assertTrue(torch.equal(ckwargs["Y"], Ys1[0]))
                        if inferred_noise:
                            self.assertTrue(torch.isnan(ckwargs["Yvar"]).all())
                        else:
                            self.assertTrue(
                                torch.equal(ckwargs["Yvar"], Yvars1[0]))
                        self.assertEqual(ckwargs["num_samples"], 4)
                        self.assertEqual(ckwargs["warmup_steps"], 0)
                        self.assertEqual(ckwargs["max_tree_depth"], 1)
                        self.assertTrue(ckwargs["disable_progbar"])
                        self.assertEqual(ckwargs["use_input_warping"],
                                         use_input_warping)
                        self.assertEqual(ckwargs["use_saas"], use_saas)
                    with ExitStack() as es:
                        _mock_mcmc = es.enter_context(mock.patch(MCMC_PATH))
                        _mock_mcmc.return_value.get_samples.side_effect = dummy_samples
                        _mock_nuts = es.enter_context(mock.patch(NUTS_PATH))
                        model.fit(
                            Xs=Xs1 + Xs2,
                            Ys=Ys1 + Ys2,
                            Yvars=Yvars1 + Yvars2,
                            search_space_digest=SearchSpaceDigest(
                                feature_names=fns,
                                bounds=bounds,
                                task_features=tfs,
                            ),
                            metric_names=mns,
                        )
                        # check MCMC.__init__ arguments
                        self.assertEqual(_mock_mcmc.call_count, 2)
                        _, ckwargs = _mock_mcmc.call_args
                        self.assertEqual(ckwargs["num_samples"], 4)
                        self.assertEqual(ckwargs["warmup_steps"], 0)
                        self.assertTrue(ckwargs["disable_progbar"])
                        # check NUTS.__init__ arguments
                        _mock_nuts.assert_called_with(
                            pyro_model,
                            jit_compile=True,
                            full_mass=True,
                            ignore_jit_warnings=True,
                            max_tree_depth=1,
                        )
                    # now actually run pyro
                    if not use_input_warping:
                        # input warping is quite slow, so we omit it for
                        # testing purposes
                        model.fit(
                            Xs=Xs1 + Xs2,
                            Ys=Ys1 + Ys2,
                            Yvars=Yvars1 + Yvars2,
                            search_space_digest=SearchSpaceDigest(
                                feature_names=fns,
                                bounds=bounds,
                                task_features=tfs,
                            ),
                            metric_names=mns,
                        )

                        for m, X, Y, Yvar in zip(model.model.models, Xs1 + Xs2,
                                                 Ys1 + Ys2, Yvars1 + Yvars2):
                            self.assertTrue(
                                torch.equal(
                                    m.train_inputs[0],
                                    X.expand(4, *X.shape),
                                ))
                            self.assertTrue(
                                torch.equal(
                                    m.train_targets,
                                    Y.view(1, -1).expand(4, Y.numel()),
                                ))
                            # check shapes of sampled parameters
                            if not inferred_noise:
                                self.assertTrue(
                                    torch.allclose(
                                        m.likelihood.noise.detach(),
                                        Yvar.view(1,
                                                  -1).expand(4, Yvar.numel()),
                                    ))
                            else:
                                self.assertEqual(m.likelihood.noise.shape,
                                                 torch.Size([4, 1]))

                            self.assertEqual(
                                m.covar_module.base_kernel.lengthscale.shape,
                                torch.Size([4, 1, X.shape[-1]]),
                            )
                            self.assertEqual(m.covar_module.outputscale.shape,
                                             torch.Size([4]))
                            self.assertEqual(
                                m.mean_module.constant.shape,
                                torch.Size([4, 1]),
                            )
                            if use_input_warping:
                                self.assertTrue(hasattr(m, "input_transform"))
                                self.assertIsInstance(m.input_transform, Warp)
                                self.assertEqual(
                                    m.input_transform.concentration0.shape,
                                    torch.Size([4, 1, 3]),
                                )
                                self.assertEqual(
                                    m.input_transform.concentration1.shape,
                                    torch.Size([4, 1, 3]),
                                )
                            else:
                                self.assertFalse(hasattr(m, "input_transform"))