示例#1
0
 def setUp(self):
     self.X = torch.tensor([[1.0, 0.0], [1.0, 1.0], [1.0, 3.0], [2.0, 2.0],
                            [3.0, 1.0]])
     self.Y = torch.tensor([
         [1.0, 0.0, 0.0],
         [1.0, 1.0, 1.0],
         [1.0, 3.0, 3.0],
         [2.0, 2.0, 4.0],
         [3.0, 1.0, 3.0],
     ])
     self.Yvar = torch.zeros(5, 3)
     self.outcome_constraints = (
         torch.tensor([[0.0, 0.0, 1.0]]),
         torch.tensor([[3.5]]),
     )
     self.objective_thresholds = torch.tensor([0.5, 1.5])
     self.objective_weights = torch.tensor([1.0, 1.0])
     bounds = [(0.0, 4.0), (0.0, 4.0)]
     self.model = MultiObjectiveBotorchModel(model_predictor=dummy_predict)
     with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
         self.model.fit(
             Xs=[self.X],
             Ys=[self.Y],
             Yvars=[self.Yvar],
             bounds=bounds,
             task_features=[],
             feature_names=["x1", "x2"],
             metric_names=["a", "b", "c"],
             fidelity_features=[],
         )
         _mock_fit_model.assert_called_once()
示例#2
0
 def __init__(
     self,
     model_constructor: TModelConstructor = get_and_fit_model_mcmc,
     model_predictor: TModelPredictor = predict_from_model_mcmc,
     # pyre-fixme[9]: acqf_constructor has type `Callable[[Model, Tensor,
     #  Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor], Any],
     #  AcquisitionFunction]`; used as `Callable[[Model, Tensor,
     #  Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor],
     #  **(Any)], AcquisitionFunction]`.
     acqf_constructor: TAcqfConstructor = get_fully_bayesian_acqf_nehvi,
     # pyre-fixme[9]: acqf_optimizer has type `Callable[[AcquisitionFunction,
     #  Tensor, int, Optional[Dict[int, float]], Optional[Callable[[Tensor],
     #  Tensor]], Any], Tensor]`; used as `Callable[[AcquisitionFunction, Tensor,
     #  int, Optional[Dict[int, float]], Optional[Callable[[Tensor], Tensor]],
     #  **(Any)], Tensor]`.
     acqf_optimizer: TOptimizer = scipy_optimizer,
     # TODO: Remove best_point_recommender for botorch_moo. Used in modelbridge._gen.
     best_point_recommender:
     TBestPointRecommender = recommend_best_observed_point,
     frontier_evaluator: TFrontierEvaluator = pareto_frontier_evaluator,
     refit_on_cv: bool = False,
     refit_on_update: bool = True,
     warm_start_refitting: bool = False,
     use_input_warping: bool = False,
     num_samples: int = 512,
     warmup_steps: int = 1024,
     thinning: int = 16,
     max_tree_depth: int = 6,
     # use_saas is deprecated. TODO: remove
     use_saas: Optional[bool] = None,
     disable_progbar: bool = False,
     gp_kernel: str = "matern",
     verbose: bool = False,
     **kwargs: Any,
 ) -> None:
     # use_saas is deprecated. TODO: remove
     if use_saas is not None:
         warnings.warn(SAAS_DEPRECATION_MSG, DeprecationWarning)
     MultiObjectiveBotorchModel.__init__(
         self,
         model_constructor=model_constructor,
         model_predictor=model_predictor,
         acqf_constructor=acqf_constructor,
         acqf_optimizer=acqf_optimizer,
         best_point_recommender=best_point_recommender,
         frontier_evaluator=frontier_evaluator,
         refit_on_cv=refit_on_cv,
         refit_on_update=refit_on_update,
         warm_start_refitting=warm_start_refitting,
         use_input_warping=use_input_warping,
         num_samples=num_samples,
         warmup_steps=warmup_steps,
         thinning=thinning,
         max_tree_depth=max_tree_depth,
         disable_progbar=disable_progbar,
         gp_kernel=gp_kernel,
         verbose=verbose,
     )
    def test_transform_ref_point(self, _mock_fit, _mock_predict, _mock_unwrap):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics = exp.optimization_config.objective.metrics
        ref_point = {metrics[0].name: 0.0, metrics[1].name: 0.0}
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNone(modelbridge._transformed_ref_point)
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(2, len(modelbridge._transformed_ref_point))

        mixed_objective_constraints_optimization_config = OptimizationConfig(
            objective=MultiObjective(
                metrics=[get_branin_metric(name="branin_b")], minimize=False),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric(name="branin_a"),
                                  op=ComparisonOp.LEQ,
                                  bound=1)
            ],
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=mixed_objective_constraints_optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point={"branin_b": 0.0},
        )
        self.assertEqual({"branin_a", "branin_b"}, modelbridge._metric_names)
        self.assertEqual(["branin_b"], modelbridge._objective_metric_names)
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(1, len(modelbridge._transformed_ref_point))
示例#4
0
    def test_pareto_frontier(self, _):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp.optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds
        )
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys())
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
            PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator
        ) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            observed_frontier_data = modelbridge.observed_pareto_frontier(
                objective_thresholds=objective_thresholds
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertEqual(1, len(observed_frontier_data))

        with self.assertRaises(ValueError):
            modelbridge.predicted_pareto_frontier(
                objective_thresholds=objective_thresholds, observation_features=[]
            )

        observation_features = [
            ObservationFeatures(parameters={"x1": 0.0, "x2": 1.0}),
            ObservationFeatures(parameters={"x1": 1.0, "x2": 0.0}),
        ]
        predicted_frontier_data = modelbridge.predicted_pareto_frontier(
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(len(predicted_frontier_data) <= 2)
示例#5
0
 def test_get_NEHVI_input_validation_errors(self):
     model = MultiObjectiveBotorchModel()
     weights = torch.ones(2)
     objective_thresholds = torch.zeros(2)
     with self.assertRaisesRegex(ValueError,
                                 "There are no feasible observed points."):
         get_NEHVI(
             model=model.model,
             objective_weights=weights,
             objective_thresholds=objective_thresholds,
         )
示例#6
0
 def test_multi_type_experiment(self):
     exp = get_multi_type_experiment()
     with self.assertRaises(NotImplementedError):
         MultiObjectiveTorchModelBridge(
             experiment=exp,
             search_space=exp.search_space,
             model=MultiObjectiveBotorchModel(),
             transforms=[],
             data=exp.fetch_data(),
             objective_thresholds={"branin_b": 0.0},
         )
示例#7
0
 def test_get_NEI_with_chebyshev_and_missing_Ys_error(self):
     model = MultiObjectiveBotorchModel()
     x = torch.zeros(2, 2)
     weights = torch.ones(2)
     with self.assertRaisesRegex(
             ValueError, "Chebyshev Scalarization requires Ys argument"):
         get_NEI(
             model=model,
             X_observed=x,
             objective_weights=weights,
             chebyshev_scalarization=True,
         )
示例#8
0
    def test_BotorchMOOModel_with_random_scalarization_and_outcome_constraints(
        self, dtype=torch.float, cuda=False
    ):
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": dtype,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True
        )
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True
        )
        n = 2
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)
        obj_t = torch.tensor([1.0, 1.0], **tkwargs)
        model = MultiObjectiveBotorchModel(acqf_constructor=get_NEI)

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            _mock_fit_model.assert_called_once()

        with mock.patch(
            SAMPLE_SIMPLEX_UTIL_PATH,
            autospec=True,
            return_value=torch.tensor([0.7, 0.3], **tkwargs),
        ) as _mock_sample_simplex, mock.patch(
            "ax.models.torch.botorch_moo_defaults.optimize_acqf_list",
            return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                outcome_constraints=(
                    torch.tensor([[1.0, 1.0]], **tkwargs),
                    torch.tensor([[10.0]], **tkwargs),
                ),
                model_gen_options={
                    "acquisition_function_kwargs": {"random_scalarization": True},
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
                objective_thresholds=obj_t,
            )
            self.assertEqual(n, _mock_sample_simplex.call_count)
示例#9
0
    def test_BotorchMOOModel_with_chebyshev_scalarization_and_outcome_constraints(
        self, dtype=torch.float, cuda=False
    ):
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": torch.float,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True
        )
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True
        )
        n = 2
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)
        model = MultiObjectiveBotorchModel(acqf_constructor=get_NEI)

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            _mock_fit_model.assert_called_once()

        with mock.patch(
            CHEBYSHEV_SCALARIZATION_PATH, wraps=get_chebyshev_scalarization
        ) as _mock_chebyshev_scalarization, mock.patch(
            "ax.models.torch.botorch_defaults.optimize_acqf",
            return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                outcome_constraints=(
                    torch.tensor([[1.0, 1.0]], **tkwargs),
                    torch.tensor([[10.0]], **tkwargs),
                ),
                model_gen_options={
                    "acquisition_function_kwargs": {"chebyshev_scalarization": True},
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # get_chebyshev_scalarization should be called once for generated candidate.
            self.assertEqual(n, _mock_chebyshev_scalarization.call_count)
示例#10
0
    def test_BotorchMOOModel_with_ehvi_and_outcome_constraints(
        self, dtype=torch.float, cuda=False
    ):
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": dtype,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True
        )
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True
        )
        n = 3
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)
        model = MultiObjectiveBotorchModel(acqf_constructor=get_EHVI)

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            _mock_fit_model.assert_called_once()

        with mock.patch(
            EHVI_ACQF_PATH, wraps=moo_monte_carlo.qExpectedHypervolumeImprovement
        ) as _mock_ehvi_acqf, mock.patch(
            "ax.models.torch.botorch_defaults.optimize_acqf",
            return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                outcome_constraints=(
                    torch.tensor([[1.0, 1.0]], **tkwargs),
                    torch.tensor([[10.0]], **tkwargs),
                ),
                model_gen_options={"optimizer_kwargs": _get_optimizer_kwargs()},
                objective_thresholds=torch.tensor([1.0, 1.0]),
            )
            # the EHVI acquisition function should be created only once.
            self.assertEqual(1, _mock_ehvi_acqf.call_count)
示例#11
0
 def test_get_EHVI_input_validation_errors(self):
     model = MultiObjectiveBotorchModel()
     x = torch.zeros(2, 2)
     weights = torch.ones(2)
     ref_point = torch.zeros(2)
     with self.assertRaisesRegex(ValueError,
                                 "There are no feasible observed points."):
         get_EHVI(model=model,
                  objective_weights=weights,
                  ref_point=ref_point)
     with self.assertRaisesRegex(
             ValueError,
             "Expected Hypervolume Improvement requires Ys argument"):
         get_EHVI(
             model=model,
             X_observed=x,
             objective_weights=weights,
             ref_point=ref_point,
         )
示例#12
0
    def test_status_quo_for_non_monolithic_data(self):
        exp = get_branin_experiment_with_multi_objective(with_status_quo=True)
        sobol_generator = get_sobol(search_space=exp.search_space, )
        sobol_run = sobol_generator.gen(n=5)
        exp.new_batch_trial(sobol_run).set_status_quo_and_optimize_power(
            status_quo=exp.status_quo).run()

        # create data where metrics vary in start and end times
        data = get_non_monolithic_branin_moo_data()

        bridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            experiment=exp,
            data=data,
            transforms=[],
        )
        self.assertEqual(bridge.status_quo.arm_name, "status_quo")
示例#13
0
    def test_hypervolume(self, _, cuda=False):
        for num_objectives in (2, 3):
            exp = get_branin_experiment_with_multi_objective(
                has_optimization_config=True,
                with_batch=True,
                num_objectives=num_objectives,
            )
            for trial in exp.trials.values():
                trial.mark_running(no_runner_required=True).mark_completed()
            metrics_dict = exp.optimization_config.metrics
            objective_thresholds = [
                ObjectiveThreshold(
                    metric=metrics_dict["branin_a"],
                    bound=0.0,
                    relative=False,
                    op=ComparisonOp.GEQ,
                ),
                ObjectiveThreshold(
                    metric=metrics_dict["branin_b"],
                    bound=1.0,
                    relative=False,
                    op=ComparisonOp.GEQ,
                ),
            ]
            if num_objectives == 3:
                objective_thresholds.append(
                    ObjectiveThreshold(
                        metric=metrics_dict["branin_c"],
                        bound=2.0,
                        relative=False,
                        op=ComparisonOp.GEQ,
                    )
                )
            optimization_config = exp.optimization_config.clone_with_args(
                objective_thresholds=objective_thresholds
            )
            exp.attach_data(
                get_branin_data_multi_objective(
                    trial_indices=exp.trials.keys(), num_objectives=num_objectives
                )
            )
            modelbridge = TorchModelBridge(
                search_space=exp.search_space,
                model=MultiObjectiveBotorchModel(),
                optimization_config=optimization_config,
                transforms=[],
                experiment=exp,
                data=exp.fetch_data(),
                torch_device=torch.device("cuda" if cuda else "cpu"),
                objective_thresholds=objective_thresholds,
            )
            with patch(
                PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator
            ) as wrapped_frontier_evaluator:
                modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
                hv = observed_hypervolume(
                    modelbridge=modelbridge, objective_thresholds=objective_thresholds
                )
                expected_hv = 20 if num_objectives == 2 else 60  # 5 * 4 (* 3)
                wrapped_frontier_evaluator.assert_called_once()
                self.assertEqual(expected_hv, hv)
                if num_objectives == 3:
                    # Test selected_metrics
                    hv = observed_hypervolume(
                        modelbridge=modelbridge,
                        objective_thresholds=objective_thresholds,
                        selected_metrics=["branin_a", "branin_c"],
                    )
                    expected_hv = 15  # (5 - 0) * (5 - 2)
                    self.assertEqual(expected_hv, hv)
                    # test that non-objective outcome raises value error
                    with self.assertRaises(ValueError):
                        hv = observed_hypervolume(
                            modelbridge=modelbridge,
                            objective_thresholds=objective_thresholds,
                            selected_metrics=["tracking"],
                        )

            with self.assertRaises(ValueError):
                predicted_hypervolume(
                    modelbridge=modelbridge,
                    objective_thresholds=objective_thresholds,
                    observation_features=[],
                )

            observation_features = [
                ObservationFeatures(parameters={"x1": 1.0, "x2": 2.0}),
                ObservationFeatures(parameters={"x1": 2.0, "x2": 1.0}),
            ]
            predicted_hv = predicted_hypervolume(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
            )
            self.assertTrue(predicted_hv >= 0)
            if num_objectives == 3:
                # Test selected_metrics
                predicted_hv = predicted_hypervolume(
                    modelbridge=modelbridge,
                    objective_thresholds=objective_thresholds,
                    observation_features=observation_features,
                    selected_metrics=["branin_a", "branin_c"],
                )
                self.assertTrue(predicted_hv >= 0)
示例#14
0
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys())
        )
        data = exp.fetch_data()
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        search_space.add_parameter_constraints(param_constraints)
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                )
            )
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )
        # test using MTGP
        sobol_generator = get_sobol(
            search_space=exp.search_space,
            seed=TEST_SOBOL_SEED,
            # set initial position equal to the number of sobol arms generated
            # so far. This means that new sobol arms will complement the previous
            # arms in a space-filling fashion
            init_position=len(exp.arms_by_name) - 1,
        )
        sobol_run = sobol_generator.gen(n=2)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        torch.manual_seed(0)  # make model fitting deterministic
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack([df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )
示例#15
0
    def test_BotorchMOOModel_with_qehvi(self,
                                        dtype=torch.float,
                                        cuda=False,
                                        use_qnehvi=False):
        if use_qnehvi:
            acqf_constructor = get_NEHVI
            partitioning_path = NEHVI_PARTITIONING_PATH
        else:
            acqf_constructor = get_EHVI
            partitioning_path = EHVI_PARTITIONING_PATH
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": dtype,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        n = 3
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)
        obj_t = torch.tensor([1.0, 1.0], **tkwargs)
        model = MultiObjectiveBotorchModel(acqf_constructor=acqf_constructor)

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            _mock_fit_model.assert_called_once()
        with ExitStack() as es:
            _mock_acqf = es.enter_context(
                mock.patch(
                    NEHVI_ACQF_PATH,
                    wraps=moo_monte_carlo.qNoisyExpectedHypervolumeImprovement,
                ))
            if use_qnehvi:
                _mock_acqf = es.enter_context(
                    mock.patch(
                        NEHVI_ACQF_PATH,
                        wraps=moo_monte_carlo.
                        qNoisyExpectedHypervolumeImprovement,
                    ))
            else:
                _mock_acqf = es.enter_context(
                    mock.patch(
                        EHVI_ACQF_PATH,
                        wraps=moo_monte_carlo.qExpectedHypervolumeImprovement,
                    ))
            es.enter_context(
                mock.patch(
                    "ax.models.torch.botorch_defaults.optimize_acqf",
                    return_value=(X_dummy, acqfv_dummy),
                ))
            _mock_partitioning = es.enter_context(
                mock.patch(
                    partitioning_path,
                    wraps=moo_monte_carlo.FastNondominatedPartitioning,
                ))
            _, _, gen_metadata, _ = model.gen(
                n,
                bounds,
                objective_weights,
                objective_thresholds=obj_t,
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs()
                },
            )
            # the NEHVI acquisition function should be created only once.
            self.assertEqual(1, _mock_acqf.call_count)
            # check partitioning strategy
            # NEHVI should call FastNondominatedPartitioning 1 time
            # since a batched partitioning is used for 2 objectives
            _mock_partitioning.assert_called_once()
            self.assertTrue(
                torch.equal(gen_metadata["objective_thresholds"], obj_t.cpu()))
            _mock_fit_model = es.enter_context(mock.patch(FIT_MODEL_MO_PATH))
            # 3 objective
            model.fit(
                Xs=Xs1 + Xs2 + Xs2,
                Ys=Ys1 + Ys2 + Ys2,
                Yvars=Yvars1 + Yvars2 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            model.gen(
                n,
                bounds,
                torch.tensor([1.0, 1.0, 1.0], **tkwargs),
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs()
                },
                objective_thresholds=torch.tensor([1.0, 1.0, 1.0], **tkwargs),
            )
            # check partitioning strategy
            # NEHVI should call FastNondominatedPartitioning 129 times because
            # we have called gen twice: The first time, a batch partitioning is used
            # so there is one call to _mock_partitioning. The second time gen() is
            # called with three objectives so 128 calls are made to _mock_partitioning
            # because a BoxDecompositionList is used. qEHVI will only make 2 calls.
            self.assertEqual(len(_mock_partitioning.mock_calls),
                             129 if use_qnehvi else 2)

            # test inferred objective thresholds in gen()
            # create several data points
            Xs1 = [torch.cat([Xs1[0], Xs1[0] - 0.1], dim=0)]
            Ys1 = [torch.cat([Ys1[0], Ys1[0] - 0.5], dim=0)]
            Ys2 = [torch.cat([Ys2[0], Ys2[0] + 0.5], dim=0)]
            Yvars1 = [torch.cat([Yvars1[0], Yvars1[0] + 0.2], dim=0)]
            Yvars2 = [torch.cat([Yvars2[0], Yvars2[0] + 0.1], dim=0)]
            model.fit(
                Xs=Xs1 + Xs1,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns + ["dummy_metric"],
            )
            _mock_model_infer_objective_thresholds = es.enter_context(
                mock.patch(
                    "ax.models.torch.botorch_moo.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            _mock_infer_reference_point = es.enter_context(
                mock.patch(
                    "ax.models.torch.botorch_moo_defaults.infer_reference_point",
                    wraps=infer_reference_point,
                ))
            # after subsetting, the model will only have two outputs
            _mock_num_outputs = es.enter_context(
                mock.patch(
                    "botorch.utils.testing.MockModel.num_outputs",
                    new_callable=mock.PropertyMock,
                ))
            _mock_num_outputs.return_value = 3
            preds = torch.tensor(
                [
                    [11.0, 2.0],
                    [9.0, 3.0],
                ],
                **tkwargs,
            )
            model.model = MockModel(MockPosterior(
                mean=preds,
                samples=preds,
            ), )
            subset_mock_model = MockModel(
                MockPosterior(
                    mean=preds,
                    samples=preds,
                ), )
            es.enter_context(
                mock.patch.object(
                    model.model,
                    "subset_output",
                    return_value=subset_mock_model,
                ))
            outcome_constraints = (
                torch.tensor([[1.0, 0.0, 0.0]], **tkwargs),
                torch.tensor([[10.0]], **tkwargs),
            )
            _, _, gen_metadata, _ = model.gen(
                n,
                bounds,
                objective_weights=torch.tensor([-1.0, -1.0, 0.0], **tkwargs),
                outcome_constraints=outcome_constraints,
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                    # do not used cached root decomposition since
                    # MockPosterior does not have an mvn attribute
                    "acquisition_function_kwargs": {
                        "cache_root": False
                    },
                },
            )
            # the NEHVI acquisition function should be created only once.
            self.assertEqual(_mock_acqf.call_count, 3)
            ckwargs = _mock_model_infer_objective_thresholds.call_args[1]
            X_observed = ckwargs["X_observed"]
            sorted_idcs = X_observed[:, 0].argsort()
            expected_X_observed = torch.tensor(
                [[1.0, 2.0, 3.0], [0.9, 1.9, 2.9]], **tkwargs)
            sorted_idcs2 = expected_X_observed[:, 0].argsort()
            self.assertTrue(
                torch.equal(
                    X_observed[sorted_idcs],
                    expected_X_observed[sorted_idcs2],
                ))
            self.assertTrue(
                torch.equal(
                    ckwargs["objective_weights"],
                    torch.tensor([-1.0, -1.0, 0.0], **tkwargs),
                ))
            oc = ckwargs["outcome_constraints"]
            self.assertTrue(torch.equal(oc[0], outcome_constraints[0]))
            self.assertTrue(torch.equal(oc[1], outcome_constraints[1]))
            self.assertIs(ckwargs["model"], subset_mock_model)
            self.assertTrue(
                torch.equal(
                    ckwargs["subset_idcs"],
                    torch.tensor([0, 1], device=tkwargs["device"]),
                ))
            _mock_infer_reference_point.assert_called_once()
            ckwargs = _mock_infer_reference_point.call_args[1]
            self.assertEqual(ckwargs["scale"], 0.1)
            self.assertTrue(
                torch.equal(ckwargs["pareto_Y"],
                            torch.tensor([[-9.0, -3.0]], **tkwargs)))
            self.assertIn("objective_thresholds", gen_metadata)
            obj_t = gen_metadata["objective_thresholds"]
            self.assertTrue(
                torch.equal(obj_t[:2],
                            torch.tensor([9.9, 3.3], dtype=tkwargs["dtype"])))
            self.assertTrue(np.isnan(obj_t[2]))
            # test providing model with extra tracking metrics and objective thresholds
            provided_obj_t = torch.tensor([10.0, 4.0, float("nan")], **tkwargs)
            _, _, gen_metadata, _ = model.gen(
                n,
                bounds,
                objective_weights=torch.tensor([-1.0, -1.0, 0.0], **tkwargs),
                outcome_constraints=outcome_constraints,
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                    # do not used cached root decomposition since
                    # MockPosterior does not have an mvn attribute
                    "acquisition_function_kwargs": {
                        "cache_root": False
                    },
                },
                objective_thresholds=provided_obj_t,
            )
            self.assertIn("objective_thresholds", gen_metadata)
            obj_t = gen_metadata["objective_thresholds"]
            self.assertTrue(torch.equal(obj_t[:2], provided_obj_t[:2].cpu()))
            self.assertTrue(np.isnan(obj_t[2]))
示例#16
0
    def test_BotorchMOOModel_with_qehvi_and_outcome_constraints(
            self, dtype=torch.float, cuda=False, use_qnehvi=False):
        acqf_constructor = get_NEHVI if use_qnehvi else get_EHVI
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": dtype,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs3, Ys3, Yvars3, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        n = 3
        objective_weights = torch.tensor([1.0, 1.0, 0.0], **tkwargs)
        obj_t = torch.tensor([1.0, 1.0, 1.0], **tkwargs)
        model = MultiObjectiveBotorchModel(acqf_constructor=acqf_constructor)

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2 + Xs3,
                Ys=Ys1 + Ys2 + Ys3,
                Yvars=Yvars1 + Yvars2 + Yvars3,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            _mock_fit_model.assert_called_once()

        # test wrong number of objective thresholds
        with self.assertRaises(AxError):
            model.gen(
                n,
                bounds,
                objective_weights,
                objective_thresholds=torch.tensor([1.0, 1.0], **tkwargs),
            )
        # test that objective thresholds and weights are properly subsetted
        obj_t = torch.tensor([1.0, 1.0, 1.0], **tkwargs)
        with mock.patch.object(
                model,
                "acqf_constructor",
                wraps=botorch_moo_defaults.get_NEHVI,
        ) as mock_get_nehvi:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs()
                },
                objective_thresholds=obj_t,
            )
            mock_get_nehvi.assert_called_once()
            _, ckwargs = mock_get_nehvi.call_args
            self.assertEqual(ckwargs["model"].num_outputs, 2)
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            objective_weights[:-1]))
            self.assertTrue(
                torch.equal(ckwargs["objective_thresholds"], obj_t[:-1]))
            self.assertIsNone(ckwargs["outcome_constraints"])
            # the second datapoint is out of bounds
            self.assertTrue(torch.equal(ckwargs["X_observed"], Xs1[0][:1]))
            self.assertIsNone(ckwargs["X_pending"])
        # test that outcome constraints are passed properly
        oc = (
            torch.tensor([[0.0, 0.0, 1.0]], **tkwargs),
            torch.tensor([[10.0]], **tkwargs),
        )
        with mock.patch.object(
                model,
                "acqf_constructor",
                wraps=botorch_moo_defaults.get_NEHVI,
        ) as mock_get_nehvi:
            model.gen(
                n,
                bounds,
                objective_weights,
                outcome_constraints=oc,
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs()
                },
                objective_thresholds=obj_t,
            )
            mock_get_nehvi.assert_called_once()
            _, ckwargs = mock_get_nehvi.call_args
            self.assertEqual(ckwargs["model"].num_outputs, 3)
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], objective_weights))
            self.assertTrue(torch.equal(ckwargs["objective_thresholds"],
                                        obj_t))
            self.assertTrue(
                torch.equal(ckwargs["outcome_constraints"][0], oc[0]))
            self.assertTrue(
                torch.equal(ckwargs["outcome_constraints"][1], oc[1]))
            # the second datapoint is out of bounds
            self.assertTrue(torch.equal(ckwargs["X_observed"], Xs1[0][:1]))
            self.assertIsNone(ckwargs["X_pending"])
示例#17
0
    def test_GetPosteriorMean(self):

        model = BotorchModel(acqf_constructor=get_PosteriorMean)
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            search_space_digest=SearchSpaceDigest(
                feature_names=self.feature_names,
                bounds=self.bounds,
            ),
            metric_names=self.metric_names,
        )

        # test model.gen() with no outcome_constraints. Analytic.
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        Xgen, wgen, _, __ = model.gen(
            n=1,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            linear_constraints=None,
        )
        self.assertTrue(torch.equal(wgen, torch.ones(1, dtype=self.dtype)))

        # test model.gen() works with outcome_constraints. qSimpleRegret.
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        Xgen, w, _, __ = model.gen(
            n=1,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=None,
        )

        # test model.gen() works with chebyshev scalarization.
        model = MultiObjectiveBotorchModel(acqf_constructor=get_PosteriorMean)
        model.fit(
            Xs=self.Xs * 2,
            Ys=self.Ys * 2,
            Yvars=self.Yvars * 2,
            search_space_digest=SearchSpaceDigest(
                feature_names=self.feature_names,
                bounds=self.bounds,
            ),
            metric_names=["m1", "m2"],
        )
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        Xgen, w, _, __ = model.gen(
            n=1,
            bounds=self.bounds,
            objective_weights=torch.ones(2, dtype=self.dtype, device=self.device),
            outcome_constraints=(
                torch.tensor([[1.0, 0.0]], dtype=self.dtype, device=self.device),
                torch.tensor([[5.0]], dtype=self.dtype, device=self.device),
            ),
            objective_thresholds=torch.zeros(2, dtype=self.dtype, device=self.device),
            linear_constraints=None,
            model_gen_options={
                "acquisition_function_kwargs": {"chebyshev_scalarization": True}
            },
        )

        # ValueError with empty X_Observed
        with self.assertRaises(ValueError):
            get_PosteriorMean(
                model=model, objective_weights=self.objective_weights, X_observed=None
            )

        # test model.predict()
        new_X_dummy = torch.rand(1, 1, 3, dtype=self.dtype, device=self.device)
        model.predict(new_X_dummy)
示例#18
0
    def test_BotorchMOOModel_with_random_scalarization(self,
                                                       dtype=torch.float,
                                                       cuda=False):
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": dtype,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        n = 3
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)
        obj_t = torch.tensor([1.0, 1.0], **tkwargs)

        model = MultiObjectiveBotorchModel(acqf_constructor=get_NEI)
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                search_space_digest=SearchSpaceDigest(
                    feature_names=fns,
                    bounds=bounds,
                    task_features=tfs,
                ),
                metric_names=mns,
            )
            _mock_fit_model.assert_called_once()

        with mock.patch(
                SAMPLE_SIMPLEX_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.7, 0.3], **tkwargs),
        ) as _mock_sample_simplex:
            model.gen(
                n,
                bounds,
                objective_weights,
                objective_thresholds=obj_t,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # Sample_simplex should be called once for generated candidate.
            self.assertEqual(n, _mock_sample_simplex.call_count)

        with mock.patch(
                SAMPLE_HYPERSPHERE_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.6, 0.8], **tkwargs),
        ) as _mock_sample_hypersphere:
            model.gen(
                n,
                bounds,
                objective_weights,
                objective_thresholds=obj_t,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True,
                        "random_scalarization_distribution": HYPERSPHERE,
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # Sample_simplex should be called once per generated candidate.
            self.assertEqual(n, _mock_sample_hypersphere.call_count)

        # test input warping
        self.assertFalse(model.use_input_warping)
        model = MultiObjectiveBotorchModel(acqf_constructor=get_NEI,
                                           use_input_warping=True)
        model.fit(
            Xs=Xs1 + Xs2,
            Ys=Ys1 + Ys2,
            Yvars=Yvars1 + Yvars2,
            search_space_digest=SearchSpaceDigest(
                feature_names=fns,
                bounds=bounds,
                task_features=tfs,
            ),
            metric_names=mns,
        )
        self.assertTrue(model.use_input_warping)
        self.assertIsInstance(model.model, ModelListGP)
        for m in model.model.models:
            self.assertTrue(hasattr(m, "input_transform"))
            self.assertIsInstance(m.input_transform, Warp)
        self.assertFalse(hasattr(model.model, "input_transform"))

        # test loocv pseudo likelihood
        self.assertFalse(model.use_loocv_pseudo_likelihood)
        model = MultiObjectiveBotorchModel(acqf_constructor=get_NEI,
                                           use_loocv_pseudo_likelihood=True)
        model.fit(
            Xs=Xs1 + Xs2,
            Ys=Ys1 + Ys2,
            Yvars=Yvars1 + Yvars2,
            search_space_digest=SearchSpaceDigest(
                feature_names=fns,
                bounds=bounds,
                task_features=tfs,
            ),
            metric_names=mns,
        )
        self.assertTrue(model.use_loocv_pseudo_likelihood)
    def test_hypervolume(self):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            hv = modelbridge.observed_hypervolume(
                objective_thresholds=objective_thresholds)
            expected_hv = 25  # (5 - 0) * (5 - 0)
            wrapped_frontier_evaluator.assert_called_once()
            self.assertEqual(expected_hv, hv)

        with self.assertRaises(ValueError):
            modelbridge.predicted_hypervolume(
                objective_thresholds=objective_thresholds,
                observation_features=[])

        observation_features = [
            ObservationFeatures(parameters={
                "x1": 1.0,
                "x2": 2.0
            }),
            ObservationFeatures(parameters={
                "x1": 2.0,
                "x2": 1.0
            }),
        ]
        predicted_hv = modelbridge.predicted_hypervolume(
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(predicted_hv >= 0)
示例#20
0
class FrontierEvaluatorTest(TestCase):
    def setUp(self):
        self.X = torch.tensor([[1.0, 0.0], [1.0, 1.0], [1.0, 3.0], [2.0, 2.0],
                               [3.0, 1.0]])
        self.Y = torch.tensor([
            [1.0, 0.0, 0.0],
            [1.0, 1.0, 1.0],
            [1.0, 3.0, 3.0],
            [2.0, 2.0, 4.0],
            [3.0, 1.0, 3.0],
        ])
        self.Yvar = torch.zeros(5, 3)
        self.outcome_constraints = (
            torch.tensor([[0.0, 0.0, 1.0]]),
            torch.tensor([[3.5]]),
        )
        self.objective_thresholds = torch.tensor([0.5, 1.5])
        self.objective_weights = torch.tensor([1.0, 1.0])
        bounds = [(0.0, 4.0), (0.0, 4.0)]
        self.model = MultiObjectiveBotorchModel(model_predictor=dummy_predict)
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            self.model.fit(
                Xs=[self.X],
                Ys=[self.Y],
                Yvars=[self.Yvar],
                search_space_digest=SearchSpaceDigest(
                    feature_names=["x1", "x2"],
                    bounds=bounds,
                ),
                metric_names=["a", "b", "c"],
            )
            _mock_fit_model.assert_called_once()

    def test_pareto_frontier_raise_error_when_missing_data(self):
        with self.assertRaises(ValueError):
            pareto_frontier_evaluator(
                model=self.model,
                objective_thresholds=self.objective_thresholds,
                objective_weights=self.objective_weights,
                Yvar=self.Yvar,
            )

    def test_pareto_frontier_evaluator_raw(self):
        Yvar = torch.diag_embed(self.Yvar)
        Y, cov, indx = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            Y=self.Y,
            Yvar=Yvar,
        )
        pred = self.Y[2:4]
        self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}")
        expected_cov = Yvar[2:4]
        self.assertTrue(torch.allclose(expected_cov, cov))
        self.assertTrue(torch.equal(torch.arange(2, 4), indx))

        # Omit objective_thresholds
        Y, cov, indx = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            Y=self.Y,
            Yvar=Yvar,
        )
        pred = self.Y[2:]
        self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}")
        expected_cov = Yvar[2:]
        self.assertTrue(torch.allclose(expected_cov, cov))
        self.assertTrue(torch.equal(torch.arange(2, 5), indx))

        # Change objective_weights so goal is to minimize b
        Y, cov, indx = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=torch.tensor([1.0, -1.0]),
            objective_thresholds=self.objective_thresholds,
            Y=self.Y,
            Yvar=Yvar,
        )
        pred = self.Y[[0, 4]]
        self.assertTrue(torch.allclose(Y, pred),
                        f"actual {Y} does not match pred {pred}")
        expected_cov = Yvar[[0, 4]]
        self.assertTrue(torch.allclose(expected_cov, cov))

        # test no points better than reference point
        Y, cov, indx = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            objective_thresholds=torch.full_like(self.objective_thresholds,
                                                 100.0),
            Y=self.Y,
            Yvar=Yvar,
        )
        self.assertTrue(torch.equal(Y, self.Y[:0]))
        self.assertTrue(torch.equal(cov, torch.zeros(0, 3, 3)))
        self.assertTrue(torch.equal(torch.tensor([], dtype=torch.long), indx))

    def test_pareto_frontier_evaluator_predict(self):
        Y, cov, indx = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            X=self.X,
        )
        pred = self.Y[2:4]
        self.assertTrue(torch.allclose(Y, pred),
                        f"actual {Y} does not match pred {pred}")
        self.assertTrue(torch.equal(torch.arange(2, 4), indx))

    def test_pareto_frontier_evaluator_with_outcome_constraints(self):
        Y, cov, indx = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            Y=self.Y,
            Yvar=self.Yvar,
            outcome_constraints=self.outcome_constraints,
        )
        pred = self.Y[2, :]
        self.assertTrue(torch.allclose(Y, pred),
                        f"actual {Y} does not match pred {pred}")
        self.assertTrue(torch.equal(torch.tensor([2], dtype=torch.long), indx))
示例#21
0
class FrontierEvaluatorTest(TestCase):
    def setUp(self):
        self.X = torch.tensor([[1.0, 0.0], [1.0, 1.0], [1.0, 3.0], [2.0, 2.0],
                               [3.0, 1.0]])
        self.Y = torch.tensor([
            [1.0, 0.0, 0.0],
            [1.0, 1.0, 1.0],
            [1.0, 3.0, 3.0],
            [2.0, 2.0, 4.0],
            [3.0, 1.0, 3.0],
        ])
        self.Yvar = torch.zeros(5, 3)
        self.outcome_constraints = (
            torch.tensor([[0.0, 0.0, 1.0]]),
            torch.tensor([[3.5]]),
        )
        self.objective_thresholds = torch.tensor([0.5, 1.5])
        self.objective_weights = torch.tensor([1.0, 1.0])
        bounds = [(0.0, 4.0), (0.0, 4.0)]
        self.model = MultiObjectiveBotorchModel(model_predictor=dummy_predict)
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            self.model.fit(
                Xs=[self.X],
                Ys=[self.Y],
                Yvars=[self.Yvar],
                bounds=bounds,
                task_features=[],
                feature_names=["x1", "x2"],
                metric_names=["a", "b", "c"],
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()

    def test_pareto_frontier_raise_error_when_missing_data(self):
        with self.assertRaises(ValueError):
            pareto_frontier_evaluator(
                model=self.model,
                objective_thresholds=self.objective_thresholds,
                objective_weights=self.objective_weights,
                Yvar=self.Yvar,
            )

    def test_pareto_frontier_evaluator_raw(self):
        Y, cov = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            Y=self.Y,
            Yvar=self.Yvar,
        )
        pred = self.Y[2:4]
        self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}")

        # Omit objective_thresholds
        Y, cov = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            Y=self.Y,
            Yvar=self.Yvar,
        )
        pred = self.Y[2:]
        self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}")

        # Change objective_weights so goal is to minimize b
        Y, cov = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=torch.tensor([1.0, -1.0]),
            objective_thresholds=self.objective_thresholds,
            Y=self.Y,
            Yvar=self.Yvar,
        )
        pred = self.Y[[0, 4]]
        self.assertTrue(torch.allclose(Y, pred),
                        f"actual {Y} does not match pred {pred}")

    def test_pareto_frontier_evaluator_predict(self):
        Y, cov = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            X=self.X,
        )
        pred = self.Y[2:4]
        self.assertTrue(torch.allclose(Y, pred),
                        f"actual {Y} does not match pred {pred}")

    def test_pareto_frontier_evaluator_with_outcome_constraints(self):
        Y, cov = pareto_frontier_evaluator(
            model=self.model,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            Y=self.Y,
            Yvar=self.Yvar,
            outcome_constraints=self.outcome_constraints,
        )
        pred = self.Y[2, :]
        self.assertTrue(torch.allclose(Y, pred),
                        f"actual {Y} does not match pred {pred}")
示例#22
0
    def test_BotorchMOOModel_with_random_scalarization(self,
                                                       dtype=torch.float,
                                                       cuda=False):
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": dtype,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        n = 3
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)

        model = MultiObjectiveBotorchModel(acqf_constructor=get_NEI)
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()

        with mock.patch(
                SAMPLE_SIMPLEX_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.7, 0.3], **tkwargs),
        ) as _mock_sample_simplex, mock.patch(
                "ax.models.torch.botorch_defaults.optimize_acqf",
                return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # Sample_simplex should be called once for generated candidate.
            self.assertEqual(n, _mock_sample_simplex.call_count)

        with mock.patch(
                SAMPLE_HYPERSPHERE_UTIL_PATH,
                autospec=True,
                return_value=torch.tensor([0.6, 0.8], **tkwargs),
        ) as _mock_sample_hypersphere, mock.patch(
                "ax.models.torch.botorch_defaults.optimize_acqf",
                return_value=(X_dummy, acqfv_dummy),
        ) as _:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "acquisition_function_kwargs": {
                        "random_scalarization": True,
                        "random_scalarization_distribution": HYPERSPHERE,
                    },
                    "optimizer_kwargs": _get_optimizer_kwargs(),
                },
            )
            # Sample_simplex should be called once per generated candidate.
            self.assertEqual(n, _mock_sample_hypersphere.call_count)
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys()))
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        outcome_constraints = [
            OutcomeConstraint(
                metric=exp.metrics["branin_a"],
                op=ComparisonOp.GEQ,
                bound=-40.0,
                relative=False,
            )
        ]
        search_space.add_parameter_constraints(param_constraints)
        exp.optimization_config.outcome_constraints = outcome_constraints
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                ))
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            oc = ckwargs["outcome_constraints"]
            self.assertTrue(torch.equal(oc[0], torch.tensor([[-1.0, 0.0]])))
            self.assertTrue(torch.equal(oc[1], torch.tensor([[45.0]])))
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.
                pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))
        # test using MTGP
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=5)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with self.assertRaises(ValueError):
            # Check that a ValueError is raised when MTGP is being used
            # and trial_index is not specified as a fixed features.
            # Note: this error is raised by StratifiedStandardizeY
            modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
            )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack(
            [df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))
示例#24
0
    def test_pareto_frontier(self, _):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp.optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys()))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            observed_frontier = observed_pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds)
            wrapped_frontier_evaluator.assert_called_once()
            self.assertIsNone(wrapped_frontier_evaluator.call_args.kwargs["X"])
            self.assertEqual(1, len(observed_frontier))
            self.assertEqual(observed_frontier[0].arm_name, "0_0")

        with self.assertRaises(ValueError):
            predicted_pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=[],
            )

        predicted_frontier = predicted_pareto_frontier(
            modelbridge=modelbridge,
            objective_thresholds=objective_thresholds,
            observation_features=None,
        )
        self.assertEqual(predicted_frontier[0].arm_name, "0_0")

        observation_features = [
            ObservationFeatures(parameters={
                "x1": 0.0,
                "x2": 1.0
            }),
            ObservationFeatures(parameters={
                "x1": 1.0,
                "x2": 0.0
            }),
        ]
        observation_data = [
            ObservationData(
                metric_names=["branin_b", "branin_a"],
                means=np.array([1.0, 2.0]),
                covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            ),
            ObservationData(
                metric_names=["branin_a", "branin_b"],
                means=np.array([3.0, 4.0]),
                covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            ),
        ]
        predicted_frontier = predicted_pareto_frontier(
            modelbridge=modelbridge,
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(len(predicted_frontier) <= 2)
        self.assertIsNone(predicted_frontier[0].arm_name, None)

        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            observed_frontier = pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
                observation_data=observation_data,
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertTrue(
                torch.equal(
                    wrapped_frontier_evaluator.call_args.kwargs["X"],
                    torch.tensor([[1.0, 4.0], [4.0, 1.0]]),
                ))

        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            observed_frontier = pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
                observation_data=observation_data,
                use_model_predictions=False,
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertIsNone(wrapped_frontier_evaluator.call_args.kwargs["X"])
            self.assertTrue(
                torch.equal(
                    wrapped_frontier_evaluator.call_args.kwargs["Y"],
                    torch.tensor([[9.0, 4.0], [16.0, 25.0]]),
                ))
示例#25
0
    def test_BotorchMOOModel_with_ehvi(self, dtype=torch.float, cuda=False):
        tkwargs = {
            "device": torch.device("cuda") if cuda else torch.device("cpu"),
            "dtype": dtype,
        }
        Xs1, Ys1, Yvars1, bounds, tfs, fns, mns = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        Xs2, Ys2, Yvars2, _, _, _, _ = _get_torch_test_data(
            dtype=dtype, cuda=cuda, constant_noise=True)
        n = 3
        objective_weights = torch.tensor([1.0, 1.0], **tkwargs)
        model = MultiObjectiveBotorchModel(acqf_constructor=get_EHVI)

        X_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)
        acqfv_dummy = torch.tensor([[[1.0, 2.0, 3.0]]], **tkwargs)

        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2,
                Ys=Ys1 + Ys2,
                Yvars=Yvars1 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )
            _mock_fit_model.assert_called_once()

        with mock.patch(EHVI_ACQF_PATH,
                        wraps=moo_monte_carlo.qExpectedHypervolumeImprovement
                        ) as _mock_ehvi_acqf, mock.patch(
                            "ax.models.torch.botorch_defaults.optimize_acqf",
                            return_value=(X_dummy, acqfv_dummy),
                        ) as _, mock.patch(
                            PARTITIONING_PATH,
                            wraps=moo_monte_carlo.NondominatedPartitioning
                        ) as _mock_partitioning:
            model.gen(
                n,
                bounds,
                objective_weights,
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs()
                },
                objective_thresholds=torch.tensor([1.0, 1.0]),
            )
            # the EHVI acquisition function should be created only once.
            self.assertEqual(1, _mock_ehvi_acqf.call_count)
            # check partitioning strategy
            self.assertEqual(_mock_partitioning.call_args[1]["alpha"], 0.0)

        # 3 objective
        with mock.patch(FIT_MODEL_MO_PATH) as _mock_fit_model:
            model.fit(
                Xs=Xs1 + Xs2 + Xs2,
                Ys=Ys1 + Ys2 + Ys2,
                Yvars=Yvars1 + Yvars2 + Yvars2,
                bounds=bounds,
                task_features=tfs,
                feature_names=fns,
                metric_names=mns,
                fidelity_features=[],
            )

        with mock.patch(EHVI_ACQF_PATH,
                        wraps=moo_monte_carlo.qExpectedHypervolumeImprovement
                        ) as _mock_ehvi_acqf, mock.patch(
                            "ax.models.torch.botorch_defaults.optimize_acqf",
                            return_value=(X_dummy, acqfv_dummy),
                        ) as _, mock.patch(
                            PARTITIONING_PATH,
                            wraps=moo_monte_carlo.NondominatedPartitioning
                        ) as _mock_partitioning:
            model.gen(
                n,
                bounds,
                torch.tensor([1.0, 1.0, 1.0], **tkwargs),
                model_gen_options={
                    "optimizer_kwargs": _get_optimizer_kwargs()
                },
                objective_thresholds=torch.tensor([1.0, 1.0, 1.0]),
            )
            # check partitioning strategy
            self.assertEqual(_mock_partitioning.call_args[1]["alpha"], 1e-5)