Ejemplo n.º 1
0
    def test_BotorchModel_with_random_scalarization(self,
                                                    dtype=torch.float,
                                                    cuda=False):
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=torch.float, cuda=False, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=torch.float, cuda=False, constant_noise=True)
        n = 3
        objective_weights = torch.tensor([1.0, 1.0],
                                         dtype=torch.float,
                                         device=torch.device("cpu"))
        model = BotorchModel()
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()

        with mock.patch(
                SAMPLE_SIMPLEX_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.7, 0.3]),
        ) as _mock_sample_simplex:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True
                    }
                },
            )
            _mock_sample_simplex.assert_called_once()

        with mock.patch(
                SAMPLE_HYPERSPHERE_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.6, 0.8]),
        ) as _mock_sample_hypersphere:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True,
                        "random_scalarization_distribution": HYPERSPHERE,
                    }
                },
            )
            _mock_sample_hypersphere.assert_called_once()

        with mock.patch(
                SAMPLE_HYPERSPHERE_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.6, 0.8]),
        ) as _mock_sample_hypersphere:
            expected = torch.tensor([0.6, -0.8])
            actual = _extract_random_scalarization_settings(
                objective_weights=torch.tensor([1.0, -1.0]),
                **{
                    "random_scalarization": True,
                    "random_scalarization_distribution": HYPERSPHERE,
                })
            self.assertTrue(torch.allclose(expected, actual))

        with mock.patch(
                SAMPLE_SIMPLEX_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.7, 0.3],
                                          dtype=torch.float,
                                          device=torch.device("cpu")),
        ) as _mock_sample_simplex:
            model.gen(
                n,
                bounds,
                objective_weights,
                outcome_constraints=(
                    torch.tensor([[1.0, 1.0]],
                                 dtype=torch.float,
                                 device=torch.device("cpu")),
                    torch.tensor([[1.0]],
                                 dtype=torch.float,
                                 device=torch.device("cpu")),
                ),
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True
                    }
                },
            )
            _mock_sample_simplex.assert_called_once()
Ejemplo n.º 2
0
    def test_BotorchModel(self, dtype=torch.float, cuda=False):
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        model = BotorchModel()
        # Test ModelListGP
        # make training data different for each output
        Xs2_diff = [Xs2[0] + 0.1]
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2_diff,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()
        # Check attributes
        self.assertTrue(torch.equal(model.Xs[0], Xs1[0]))
        self.assertTrue(torch.equal(model.Xs[1], Xs2_diff[0]))
        self.assertEqual(model.dtype, Xs1[0].dtype)
        self.assertEqual(model.device, Xs1[0].device)
        self.assertIsInstance(model.model, ModelListGP)

        # Check fitting
        model_list = model.model.models
        self.assertTrue(torch.equal(model_list[0].train_inputs[0], Xs1[0]))
        self.assertTrue(torch.equal(model_list[1].train_inputs[0],
                                    Xs2_diff[0]))
        self.assertTrue(
            torch.equal(model_list[0].train_targets, Ys1[0].view(-1)))
        self.assertTrue(
            torch.equal(model_list[1].train_targets, Ys2[0].view(-1)))
        self.assertIsInstance(model_list[0].likelihood,
                              _GaussianLikelihoodBase)
        self.assertIsInstance(model_list[1].likelihood,
                              _GaussianLikelihoodBase)

        # Test batched multi-output FixedNoiseGP
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()

        # Check attributes
        self.assertTrue(torch.equal(model.Xs[0], Xs1[0]))
        self.assertTrue(torch.equal(model.Xs[1], Xs2[0]))
        self.assertEqual(model.dtype, Xs1[0].dtype)
        self.assertEqual(model.device, Xs1[0].device)
        self.assertIsInstance(model.model, FixedNoiseGP)

        # Check fitting
        # train inputs should be `o x n x 1`
        self.assertTrue(
            torch.equal(
                model.model.train_inputs[0],
                Xs1[0].unsqueeze(0).expand(torch.Size([2]) + Xs1[0].shape),
            ))
        # train targets should be `o x n`
        self.assertTrue(
            torch.equal(model.model.train_targets,
                        torch.cat(Ys1 + Ys2, dim=-1).permute(1, 0)))
        self.assertIsInstance(model.model.likelihood, _GaussianLikelihoodBase)

        # Check infeasible cost can be computed on the model
        device = torch.device("cuda") if cuda else torch.device("cpu")
        objective_weights = torch.tensor([1.0, 0.0],
                                         dtype=dtype,
                                         device=device)
        objective_transform = get_objective_weights_transform(
            objective_weights)
        infeasible_cost = torch.tensor(
            get_infeasible_cost(X=Xs1[0],
                                model=model.model,
                                objective=objective_transform))
        expected_infeasible_cost = -1 * torch.min(
            objective_transform(
                model.model.posterior(Xs1[0]).mean -
                6 * model.model.posterior(Xs1[0]).variance.sqrt()).min(),
            torch.tensor(0.0, dtype=dtype, device=device),
        )
        self.assertTrue(
            torch.abs(infeasible_cost - expected_infeasible_cost) < 1e-5)

        # Check prediction
        X = torch.tensor([[6.0, 7.0, 8.0]], dtype=dtype, device=device)
        f_mean, f_cov = model.predict(X)
        self.assertTrue(f_mean.shape == torch.Size([1, 2]))
        self.assertTrue(f_cov.shape == torch.Size([1, 2, 2]))

        # Check generation
        objective_weights = torch.tensor([1.0, 0.0],
                                         dtype=dtype,
                                         device=device)
        outcome_constraints = (
            torch.tensor([[0.0, 1.0]], dtype=dtype, device=device),
            torch.tensor([[5.0]], dtype=dtype, device=device),
        )
        linear_constraints = (torch.tensor([[0.0, 1.0,
                                             1.0]]), torch.tensor([[100.0]]))
        fixed_features = None
        pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]], dtype=dtype, device=device),
            torch.tensor([[2.0, 6.0, 8.0]], dtype=dtype, device=device),
        ]
        n = 3

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], dtype=dtype, device=device)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]],
                                   dtype=dtype,
                                   device=device)
        model_gen_options = {}
        # test sequential optimize
        with mock.patch(
                "ax.models.torch.botorch_defaults.optimize_acqf",
                return_value=(X_dummy, acqfv_dummy),
        ) as mock_optimize_acqf:
            Xgen, wgen, gen_metadata, cand_metadata = model.gen(
                n=n,
                bounds=bounds,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                linear_constraints=linear_constraints,
                fixed_features=fixed_features,
                pending_observations=pending_observations,
                model_gen_options=model_gen_options,
                rounding_func=dummy_func,
            )
            # note: gen() always returns CPU tensors
            self.assertTrue(torch.equal(Xgen, X_dummy.cpu()))
            self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=dtype)))

        # test joint optimize
        with mock.patch(
                "ax.models.torch.botorch_defaults.optimize_acqf",
                return_value=(X_dummy, acqfv_dummy),
        ) as mock_optimize_acqf:
            Xgen, wgen, gen_metadata, cand_metadata = model.gen(
                n=n,
                bounds=bounds,
                objective_weights=objective_weights,
                outcome_constraints=None,
                linear_constraints=None,
                fixed_features=fixed_features,
                pending_observations=pending_observations,
                model_gen_options={
                    "optimizer_kwargs": {
                        "joint_optimization": True
                    }
                },
            )
            # note: gen() always returns CPU tensors
            self.assertTrue(torch.equal(Xgen, X_dummy.cpu()))
            self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=dtype)))
            mock_optimize_acqf.assert_called_once()

        # test that fidelity features are unsupported
        with self.assertRaises(NotImplementedError):
            Xgen, wgen = model.gen(
                n=n,
                bounds=bounds,
                objective_weights=objective_weights,
                outcome_constraints=None,
                linear_constraints=None,
                fixed_features=fixed_features,
                pending_observations=pending_observations,
                model_gen_options={
                    "optimizer_kwargs": {
                        "joint_optimization": True
                    }
                },
                target_fidelities={0: 3.0},
            )

        # test get_rounding_func
        dummy_rounding = get_rounding_func(rounding_func=dummy_func)
        X_temp = torch.rand(1, 2, 3, 4)
        self.assertTrue(torch.equal(X_temp, dummy_rounding(X_temp)))

        # Check best point selection
        xbest = model.best_point(bounds=bounds,
                                 objective_weights=objective_weights)
        xbest = model.best_point(
            bounds=bounds,
            objective_weights=objective_weights,
            fixed_features={0: 100.0},
        )
        self.assertIsNone(xbest)

        # test that fidelity features are unsupported
        with self.assertRaises(NotImplementedError):
            xbest = model.best_point(
                bounds=bounds,
                objective_weights=objective_weights,
                fixed_features={0: 100.0},
                target_fidelities={0: 3.0},
            )

        # Test cross-validation
        mean, variance = model.cross_validate(
            Xs_train=Xs1 + Xs2,
            Ys_train=Ys1 + Ys2,
            Yvars_train=Yvars1 + Yvars2,
            X_test=torch.tensor([[1.2, 3.2, 4.2], [2.4, 5.2, 3.2]],
                                dtype=dtype,
                                device=device),
        )
        self.assertTrue(mean.shape == torch.Size([2, 2]))
        self.assertTrue(variance.shape == torch.Size([2, 2, 2]))

        # Test cross-validation with refit_on_cv
        model.refit_on_cv = True
        mean, variance = model.cross_validate(
            Xs_train=Xs1 + Xs2,
            Ys_train=Ys1 + Ys2,
            Yvars_train=Yvars1 + Yvars2,
            X_test=torch.tensor([[1.2, 3.2, 4.2], [2.4, 5.2, 3.2]],
                                dtype=dtype,
                                device=device),
        )
        self.assertTrue(mean.shape == torch.Size([2, 2]))
        self.assertTrue(variance.shape == torch.Size([2, 2, 2]))

        # Test update
        model.refit_on_update = False
        model.update(Xs=Xs2 + Xs2, Ys=Ys2 + Ys2, Yvars=Yvars2 + Yvars2)

        # Test feature_importances
        importances = model.feature_importances()
        self.assertEqual(importances.shape, torch.Size([2, 1, 3]))

        # When calling update directly, the data is completely overwritten.
        self.assertTrue(torch.equal(model.Xs[0], Xs2[0]))
        self.assertTrue(torch.equal(model.Xs[1], Xs2[0]))
        self.assertTrue(torch.equal(model.Ys[0], Ys2[0]))
        self.assertTrue(torch.equal(model.Yvars[0], Yvars2[0]))

        model.refit_on_update = True
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.update(Xs=Xs2 + Xs2, Ys=Ys2 + Ys2, Yvars=Yvars2 + Yvars2)

        # test unfit model CV, update, and feature_importances
        unfit_model = BotorchModel()
        with self.assertRaises(RuntimeError):
            unfit_model.cross_validate(
                Xs_train=Xs1 + Xs2,
                Ys_train=Ys1 + Ys2,
                Yvars_train=Yvars1 + Yvars2,
                X_test=Xs1[0],
            )
        with self.assertRaises(RuntimeError):
            unfit_model.update(Xs=Xs1 + Xs2,
                               Ys=Ys1 + Ys2,
                               Yvars=Yvars1 + Yvars2)
        with self.assertRaises(RuntimeError):
            unfit_model.feature_importances()

        # Test loading state dict
        tkwargs = {"device": device, "dtype": dtype}
        true_state_dict = {
            "mean_module.constant": [3.5004],
            "covar_module.raw_outputscale":
            2.2438,
            "covar_module.base_kernel.raw_lengthscale":
            [[-0.9274, -0.9274, -0.9274]],
            "covar_module.base_kernel.lengthscale_prior.concentration":
            3.0,
            "covar_module.base_kernel.lengthscale_prior.rate":
            6.0,
            "covar_module.outputscale_prior.concentration":
            2.0,
            "covar_module.outputscale_prior.rate":
            0.15,
        }
        true_state_dict = {
            key: torch.tensor(val, **tkwargs)
            for key, val in true_state_dict.items()
        }
        model = get_and_fit_model(
            Xs=Xs1,
            Ys=Ys1,
            Yvars=Yvars1,
            task_features=[],
            fidelity_features=[],
            metric_names=[],
            state_dict=true_state_dict,
            refit_model=False,
        )
        for k, v in chain(model.named_parameters(), model.named_buffers()):
            self.assertTrue(torch.equal(true_state_dict[k], v))

        # Test for some change in model parameters & buffer for refit_model=True
        true_state_dict["mean_module.constant"] += 0.1
        true_state_dict["covar_module.raw_outputscale"] += 0.1
        true_state_dict["covar_module.base_kernel.raw_lengthscale"] += 0.1
        model = get_and_fit_model(
            Xs=Xs1,
            Ys=Ys1,
            Yvars=Yvars1,
            task_features=[],
            fidelity_features=[],
            metric_names=[],
            state_dict=true_state_dict,
            refit_model=True,
        )
        self.assertTrue(
            any(not torch.equal(true_state_dict[k], v) for k, v in chain(
                model.named_parameters(), model.named_buffers())))

        # Test that recommend_best_out_of_sample_point errors w/o _get_best_point_acqf
        model = BotorchModel(
            best_point_recommender=recommend_best_out_of_sample_point)
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2_diff,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
        with self.assertRaises(RuntimeError):
            xbest = model.best_point(bounds=bounds,
                                     objective_weights=objective_weights)
Ejemplo n.º 3
0
def get_MTGP(
    experiment: Experiment,
    data: Data,
    search_space: Optional[SearchSpace] = None,
    trial_index: Optional[int] = None,
) -> TorchModelBridge:
    """Instantiates a Multi-task Gaussian Process (MTGP) model that generates
    points with EI.

    If the input experiment is a MultiTypeExperiment then a
    Multi-type Multi-task GP model will be instantiated.
    Otherwise, the model will be a Single-type Multi-task GP.
    """

    if isinstance(experiment, MultiTypeExperiment):
        trial_index_to_type = {
            t.index: t.trial_type for t in experiment.trials.values()
        }
        transforms = MT_MTGP_trans
        transform_configs = {
            "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}},
            "ConvertMetricNames": tconfig_from_mt_experiment(experiment),
        }
    else:
        # Set transforms for a Single-type MTGP model.
        transforms = ST_MTGP_trans
        transform_configs = None

    # Choose the status quo features for the experiment from the selected trial.
    # If trial_index is None, we will look for a status quo from the last
    # experiment trial to use as a status quo for the experiment.
    if trial_index is None:
        trial_index = len(experiment.trials) - 1
    elif trial_index >= len(experiment.trials):
        raise ValueError("trial_index is bigger than the number of experiment trials")

    # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.
    status_quo = experiment.trials[trial_index].status_quo
    if status_quo is None:
        status_quo_features = None
    else:
        status_quo_features = ObservationFeatures(
            parameters=status_quo.parameters,
            # pyre-fixme[6]: Expected `Optional[numpy.int64]` for 2nd param but got
            #  `int`.
            trial_index=trial_index,
        )

    return TorchModelBridge(
        experiment=experiment,
        search_space=search_space or experiment.search_space,
        data=data,
        model=BotorchModel(),
        transforms=transforms,
        # pyre-fixme[6]: Expected `Optional[Dict[str, Dict[str,
        #  typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,
        #  int, str]]]]` for 6th param but got `Optional[Dict[str,
        #  typing.Union[Dict[str, Dict[str, Dict[int, Optional[str]]]], Dict[str,
        #  typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,
        #  int, str]]]]]`.
        transform_configs=transform_configs,
        torch_dtype=torch.double,
        torch_device=DEFAULT_TORCH_DEVICE,
        status_quo_features=status_quo_features,
    )
Ejemplo n.º 4
0
    def test_BotorchModel_with_scalarization_and_outcome_constraints(
            self, dtype=torch.float, cuda=False):
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": torch.float,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        n = 2
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)
        model = BotorchModel()
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)

        with mock.patch(
                SAMPLE_SIMPLEX_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.7, 0.3], **tkwargs),
        ) as _mock_sample_simplex, mock.patch(
                "ax.models.torch.botorch_defaults.optimize_acqf",
                return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                outcome_constraints=(
                    torch.tensor([[1.0, 1.0]], **tkwargs),
                    torch.tensor([[10.0]], **tkwargs),
                ),
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            self.assertEqual(n, _mock_sample_simplex.call_count)

        with mock.patch(CHEBYSHEV_SCALARIZATION_PATH,
                        wraps=get_chebyshev_scalarization
                        ) as _mock_chebyshev_scalarization, mock.patch(
                            "ax.models.torch.botorch_defaults.optimize_acqf",
                            return_value=(X_dummy, acqfv_dummy),
                        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                outcome_constraints=(
                    torch.tensor([[1.0, 1.0]], **tkwargs),
                    torch.tensor([[10.0]], **tkwargs),
                ),
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "chebyshev_scalarization": True
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # get_chebyshev_scalarization should be called once for generated candidate.
            self.assertEqual(n, _mock_chebyshev_scalarization.call_count)
Ejemplo n.º 5
0
    def test_GetPosteriorMean(self):

        model = BotorchModel(acqf_constructor=get_PosteriorMean)
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            bounds=self.bounds,
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            task_features=[],
            fidelity_features=[],
        )

        # test model.gen() with no outcome_constraints. Analytic.
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        Xgen, wgen, _, __ = model.gen(
            n=1,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            linear_constraints=None,
        )
        self.assertTrue(torch.equal(wgen, torch.ones(1, dtype=self.dtype)))

        # test model.gen() works with outcome_constraints. qSimpleRegret.
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        Xgen, w, _, __ = model.gen(
            n=1,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=None,
        )

        # test model.gen() works with chebyshev scalarization.
        model = MultiObjectiveBotorchModel(acqf_constructor=get_PosteriorMean)
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            bounds=self.bounds,
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            task_features=[],
            fidelity_features=[],
        )
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        Xgen, w, _, __ = model.gen(
            n=1,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=None,
            model_gen_options={
                "acquisition_function_kwargs": {"chebyshev_scalarization": True}
            },
        )

        # ValueError with empty X_Observed
        with self.assertRaises(ValueError):
            get_PosteriorMean(
                model=model, objective_weights=self.objective_weights, X_observed=None
            )

        # test model.predict()
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        model.predict(new_X_dummy)
Ejemplo n.º 6
0
    def __init__(
        self,
        model_constructor: TModelConstructor = get_and_fit_model_mcmc,
        model_predictor: TModelPredictor = predict_from_model_mcmc,
        acqf_constructor: TAcqfConstructor = get_fully_bayesian_acqf,
        # pyre-fixme[9]: acqf_optimizer declared/used type mismatch
        acqf_optimizer: TOptimizer = scipy_optimizer,
        best_point_recommender:
        TBestPointRecommender = recommend_best_observed_point,
        refit_on_cv: bool = False,
        refit_on_update: bool = True,
        warm_start_refitting: bool = True,
        use_input_warping: bool = False,
        # use_saas is deprecated. TODO: remove
        use_saas: Optional[bool] = None,
        num_samples: int = 512,
        warmup_steps: int = 1024,
        thinning: int = 16,
        max_tree_depth: int = 6,
        disable_progbar: bool = False,
        gp_kernel: str = "matern",
        verbose: bool = False,
        **kwargs: Any,
    ) -> None:
        """Initialize Fully Bayesian Botorch Model.

        Args:
            model_constructor: A callable that instantiates and fits a model on data,
                with signature as described below.
            model_predictor: A callable that predicts using the fitted model, with
                signature as described below.
            acqf_constructor: A callable that creates an acquisition function from a
                fitted model, with signature as described below.
            acqf_optimizer: A callable that optimizes the acquisition function, with
                signature as described below.
            best_point_recommender: A callable that recommends the best point, with
                signature as described below.
            refit_on_cv: If True, refit the model for each fold when performing
                cross-validation.
            refit_on_update: If True, refit the model after updating the training
                data using the `update` method.
            warm_start_refitting: If True, start model refitting from previous
                model parameters in order to speed up the fitting process.
            use_input_warping: A boolean indicating whether to use input warping
            use_saas: [deprecated] A boolean indicating whether to use the SAAS model
            num_samples: The number of MCMC samples. Note that with thinning,
                num_samples/thinning samples are retained.
            warmup_steps: The number of burn-in steps for NUTS.
            thinning: The amount of thinning. Every nth sample is retained.
            max_tree_depth: The max_tree_depth for NUTS.
            disable_progbar: A boolean indicating whether to print the progress
                bar and diagnostics during MCMC.
            gp_kernel: The type of ARD base kernel. "matern" corresponds to a Matern-5/2
                kernel and "rbf" corresponds to an RBF kernel.
            verbose: A boolean indicating whether to print summary stats from MCMC.
        """
        # use_saas is deprecated. TODO: remove
        if use_saas is not None:
            warnings.warn(SAAS_DEPRECATION_MSG, DeprecationWarning)
        BotorchModel.__init__(
            self,
            model_constructor=model_constructor,
            model_predictor=model_predictor,
            acqf_constructor=acqf_constructor,
            acqf_optimizer=acqf_optimizer,
            best_point_recommender=best_point_recommender,
            refit_on_cv=refit_on_cv,
            refit_on_update=refit_on_update,
            warm_start_refitting=warm_start_refitting,
            use_input_warping=use_input_warping,
            num_samples=num_samples,
            warmup_steps=warmup_steps,
            thinning=thinning,
            max_tree_depth=max_tree_depth,
            disable_progbar=disable_progbar,
            gp_kernel=gp_kernel,
            verbose=verbose,
        )
Ejemplo n.º 7
0
    def test_BotorchModel_with_random_scalarization(self,
                                                    dtype=torch.float,
                                                    cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=torch.float, cuda=False, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=torch.float, cuda=False, constant_noise=True)
        n = 3
        objective_weights = torch.tensor([1.0, 1.0],
                                         dtype=torch.float,
                                         device=torch.device("cpu"))
        model = BotorchModel()
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], dtype=dtype, device=device)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]],
                                   dtype=dtype,
                                   device=device)

        with mock.patch(
                SAMPLE_SIMPLEX_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.7, 0.3]),
        ) as _mock_sample_simplex, mock.patch(
                "ax.models.torch.botorch_defaults.optimize_acqf",
                return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # Sample_simplex should be called once for generated candidate.
            self.assertEqual(n, _mock_sample_simplex.call_count)

        with mock.patch(
                SAMPLE_HYPERSPHERE_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.6, 0.8]),
        ) as _mock_sample_hypersphere, mock.patch(
                "ax.models.torch.botorch_defaults.optimize_acqf",
                return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True,
                        "random_scalarization_distribution": HYPERSPHERE,
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # Sample_simplex should be called once per generated candidate.
            self.assertEqual(n, _mock_sample_hypersphere.call_count)