def test_transform_ref_point(self, _mock_fit, _mock_predict, _mock_unwrap):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics = exp.optimization_config.objective.metrics
        ref_point = {metrics[0].name: 0.0, metrics[1].name: 0.0}
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNone(modelbridge._transformed_ref_point)
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(2, len(modelbridge._transformed_ref_point))

        mixed_objective_constraints_optimization_config = OptimizationConfig(
            objective=MultiObjective(
                metrics=[get_branin_metric(name="branin_b")], minimize=False),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric(name="branin_a"),
                                  op=ComparisonOp.LEQ,
                                  bound=1)
            ],
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=mixed_objective_constraints_optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point={"branin_b": 0.0},
        )
        self.assertEqual({"branin_a", "branin_b"}, modelbridge._metric_names)
        self.assertEqual(["branin_b"], modelbridge._objective_metric_names)
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(1, len(modelbridge._transformed_ref_point))
예제 #2
0
 def test_is_moo_problem(self):
     exp = get_branin_experiment()
     self.assertFalse(exp.is_moo_problem)
     exp = get_branin_experiment_with_multi_objective()
     self.assertTrue(exp.is_moo_problem)
     exp._optimization_config = None
     self.assertFalse(exp.is_moo_problem)
예제 #3
0
    def test_MOO_RS(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_RS(experiment=single_obj_exp,
                       data=single_obj_exp.fetch_data())

        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_RS(experiment=multi_obj_exp,
                       data=multi_obj_exp.fetch_data())

        multi_obj_exp.trials[0].run()
        moo_rs = get_MOO_RS(experiment=multi_obj_exp,
                            data=multi_obj_exp.fetch_data())
        self.assertIsInstance(moo_rs, TorchModelBridge)
        self.assertEqual(
            {
                "acquisition_function_kwargs": {
                    "random_scalarization": True,
                    "sequential": True,
                }
            },
            moo_rs._default_model_gen_options,
        )
        moo_rs_run = moo_rs.gen(n=5)
        self.assertEqual(len(moo_rs_run.arms), 5)
예제 #4
0
    def test_status_quo_for_non_monolithic_data(self, mock_gen):
        mock_gen.return_value = (
            [
                ObservationFeatures(parameters={
                    "x1": float(i),
                    "x2": float(i)
                },
                                    trial_index=np.int64(1)) for i in range(5)
            ],
            [1] * 5,
            None,
            {},
        )
        exp = get_branin_experiment_with_multi_objective(with_status_quo=True)
        sobol = Models.SOBOL(search_space=exp.search_space)
        exp.new_batch_trial(sobol.gen(5)).set_status_quo_and_optimize_power(
            status_quo=exp.status_quo).run()

        # create data where metrics vary in start and end times
        data = get_non_monolithic_branin_moo_data()
        with warnings.catch_warnings(record=True) as ws:
            bridge = ModelBridge(
                experiment=exp,
                data=data,
                model=Model(),
                search_space=exp.search_space,
            )
        # just testing it doesn't error
        bridge.gen(5)
        self.assertTrue(any("start_time" in str(w.message) for w in ws))
        self.assertTrue(any("end_time" in str(w.message) for w in ws))
        self.assertEqual(bridge.status_quo.arm_name, "status_quo")
예제 #5
0
    def setUp(self):
        self.branin_experiment = get_branin_experiment_with_multi_objective()
        sobol = Models.SOBOL(search_space=self.branin_experiment.search_space)
        sobol_run = sobol.gen(n=20)
        self.branin_experiment.new_batch_trial().add_generator_run(
            sobol_run
        ).run().mark_completed()
        data = self.branin_experiment.fetch_data()

        ms_gpei = ModelSpec(model_enum=Models.GPEI)
        ms_gpei.fit(experiment=self.branin_experiment, data=data)

        ms_gpkg = ModelSpec(model_enum=Models.GPKG)
        ms_gpkg.fit(experiment=self.branin_experiment, data=data)

        self.fitted_model_specs = [ms_gpei, ms_gpkg]

        self.model_selection_node = GenerationNode(
            model_specs=self.fitted_model_specs,
            best_model_selector=SingleDiagnosticBestModelSelector(
                diagnostic="Fisher exact test p",
                criterion=MetricAggregation.MEAN,
                metric_aggregation=DiagnosticCriterion.MIN,
            ),
        )
예제 #6
0
    def test_MOO_PAREGO(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_PAREGO(experiment=single_obj_exp, data=single_obj_exp.fetch_data())

        multi_obj_exp = get_branin_experiment_with_multi_objective(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_PAREGO(experiment=multi_obj_exp, data=multi_obj_exp.fetch_data())

        multi_obj_exp.trials[0].run().mark_completed()
        moo_parego = get_MOO_PAREGO(
            experiment=multi_obj_exp, data=multi_obj_exp.fetch_data()
        )
        self.assertIsInstance(moo_parego, TorchModelBridge)
        self.assertEqual(
            {
                "acquisition_function_kwargs": {
                    "chebyshev_scalarization": True,
                    "sequential": True,
                }
            },
            moo_parego._default_model_gen_options,
        )
        moo_parego_run = moo_parego.gen(n=2)
        self.assertEqual(len(moo_parego_run.arms), 2)
예제 #7
0
    def test_pareto_frontier(self, _):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp.optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds
        )
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys())
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
            PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator
        ) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            observed_frontier_data = modelbridge.observed_pareto_frontier(
                objective_thresholds=objective_thresholds
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertEqual(1, len(observed_frontier_data))

        with self.assertRaises(ValueError):
            modelbridge.predicted_pareto_frontier(
                objective_thresholds=objective_thresholds, observation_features=[]
            )

        observation_features = [
            ObservationFeatures(parameters={"x1": 0.0, "x2": 1.0}),
            ObservationFeatures(parameters={"x1": 1.0, "x2": 0.0}),
        ]
        predicted_frontier_data = modelbridge.predicted_pareto_frontier(
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(len(predicted_frontier_data) <= 2)
예제 #8
0
    def test_MOO_EHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                ref_point=[0, 0],
            )
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                ref_point={
                    metrics[0].name: 0.0,
                    metrics[1].name: 0.0
                },
            )

        multi_obj_exp.trials[0].run()
        moo_ehvi = get_MOO_EHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            ref_point={
                metrics[0].name: 0.0,
                metrics[1].name: 0.0
            },
        )
        self.assertIsInstance(moo_ehvi, MultiObjectiveTorchModelBridge)
        moo_ehvi_run = moo_ehvi.gen(n=1)
        self.assertEqual(len(moo_ehvi_run.arms), 1)
예제 #9
0
 def testPlotMultipleParetoFrontiers(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_objective_thresholds=True, )
     sobol = Models.SOBOL(experiment.search_space)
     a = sobol.gen(5)
     experiment.new_batch_trial(generator_run=a).run()
     pfrs = get_observed_pareto_frontiers(experiment=experiment)
     pfrs2 = copy.deepcopy(pfrs)
     pfr_lists = {"pfrs 1": pfrs, "pfrs 2": pfrs2}
     self.assertIsNotNone(interact_multiple_pareto_frontier(pfr_lists))
예제 #10
0
 def test_MOO_with_more_outcomes_than_thresholds(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_optimization_config=False)
     metric_c = Metric(name="c", lower_is_better=False)
     metric_a = Metric(name="a", lower_is_better=False)
     objective_thresholds = [
         ObjectiveThreshold(
             metric=metric_c,
             bound=2.0,
             relative=False,
         ),
         ObjectiveThreshold(
             metric=metric_a,
             bound=1.0,
             relative=False,
         ),
     ]
     experiment.optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(objectives=[
             Objective(metric=metric_a),
             Objective(metric=metric_c),
         ]),
         objective_thresholds=objective_thresholds,
     )
     experiment.add_tracking_metric(Metric(name="b", lower_is_better=False))
     sobol = get_sobol(search_space=experiment.search_space, )
     sobol_run = sobol.gen(1)
     experiment.new_batch_trial().add_generator_run(
         sobol_run).run().mark_completed()
     data = Data(
         pd.DataFrame(
             data={
                 "arm_name": ["0_0", "0_0", "0_0"],
                 "metric_name": ["a", "b", "c"],
                 "mean": [1.0, 2.0, 3.0],
                 "trial_index": [0, 0, 0],
                 "sem": [0, 0, 0],
             }))
     test_names_to_fns = {
         "MOO_NEHVI": get_MOO_NEHVI,
         "MOO_EHVI": get_MOO_NEHVI,
         "MOO_PAREGO": get_MOO_PAREGO,
         "MOO_RS": get_MOO_RS,
     }
     for test_name, factory_fn in test_names_to_fns.items():
         with self.subTest(test_name):
             moo_model = factory_fn(
                 experiment=experiment,
                 data=data,
             )
             moo_gr = moo_model.gen(n=1)
             obj_t = moo_gr.gen_metadata["objective_thresholds"]
             self.assertEqual(obj_t[0], objective_thresholds[1])
             self.assertEqual(obj_t[1], objective_thresholds[0])
             self.assertEqual(len(obj_t), 2)
예제 #11
0
    def test_ST_MTGP_NEHVI(self):
        """Tests single type MTGP NEHVI instantiation."""
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True,
            with_status_quo=True,
        )
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
            ObjectiveThreshold(metric=metrics[1],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
        ]
        sobol = Models.SOBOL(search_space=multi_obj_exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(2):
            sobol_run = sobol.gen(n=1)
            t = multi_obj_exp.new_batch_trial().add_generator_run(sobol_run)
            t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
            t.run().mark_completed()
        status_quo_features = ObservationFeatures(
            parameters=multi_obj_exp.trials[0].status_quo.parameters,
            trial_index=0,
        )
        mtgp = Models.ST_MTGP_NEHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            status_quo_features=status_quo_features,
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(mtgp, TorchModelBridge)
        self.assertIsInstance(mtgp.model, MultiObjectiveBotorchModel)

        # test it can generate
        mtgp_run = mtgp.gen(n=1,
                            fixed_features=ObservationFeatures(parameters={},
                                                               trial_index=1))
        self.assertEqual(len(mtgp_run.arms), 1)

        # test a generated trial can be completed
        t = multi_obj_exp.new_batch_trial().add_generator_run(mtgp_run)
        t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
        t.run().mark_completed()
예제 #12
0
    def test_MOO_EHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        metrics = single_obj_exp.optimization_config.objective.metrics
        objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ)
        ]
        # ValueError: Multi-objective optimization requires multiple objectives.
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                objective_thresholds=objective_thresholds,
            )
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
            ObjectiveThreshold(metric=metrics[1],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
        ]
        # ValueError: MultiObjectiveOptimization requires non-empty data.
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
            )

        multi_obj_exp.trials[0].run().mark_completed()
        moo_ehvi = get_MOO_EHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(moo_ehvi, MultiObjectiveTorchModelBridge)
        moo_ehvi_run = moo_ehvi.gen(n=1)
        self.assertEqual(len(moo_ehvi_run.arms), 1)
예제 #13
0
    def test_status_quo_for_non_monolithic_data(self):
        exp = get_branin_experiment_with_multi_objective(with_status_quo=True)
        sobol_generator = get_sobol(search_space=exp.search_space, )
        sobol_run = sobol_generator.gen(n=5)
        exp.new_batch_trial(sobol_run).set_status_quo_and_optimize_power(
            status_quo=exp.status_quo).run()

        # create data where metrics vary in start and end times
        data = get_non_monolithic_branin_moo_data()

        bridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            experiment=exp,
            data=data,
            transforms=[],
        )
        self.assertEqual(bridge.status_quo.arm_name, "status_quo")
예제 #14
0
파일: test_factory.py 프로젝트: dme65/Ax
    def test_MOO_EHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        metrics = single_obj_exp.optimization_config.objective.metrics
        metrics[0].lower_is_better = True
        objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ)
        ]
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                objective_thresholds=objective_thresholds,
            )
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        metrics[0].lower_is_better = False
        metrics[1].lower_is_better = True
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0], bound=0.0, relative=False),
            ObjectiveThreshold(metric=metrics[1], bound=0.0, relative=False),
        ]
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
            )

        multi_obj_exp.trials[0].run()
        moo_ehvi = get_MOO_EHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(moo_ehvi, MultiObjectiveTorchModelBridge)
        moo_ehvi_run = moo_ehvi.gen(n=1)
        self.assertEqual(len(moo_ehvi_run.arms), 1)
예제 #15
0
 def test_get_standard_plots(self):
     exp = get_branin_experiment()
     self.assertEqual(
         len(
             get_standard_plots(experiment=exp,
                                model=get_generation_strategy().model)),
         0,
     )
     exp = get_branin_experiment(with_batch=True, minimize=True)
     exp.trials[0].run()
     gs = choose_generation_strategy(search_space=exp.search_space)
     gs._model = Models.BOTORCH(experiment=exp, data=exp.fetch_data())
     plots = get_standard_plots(experiment=exp, model=gs.model)
     self.assertEqual(len(plots), 5)
     self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
     exp = get_branin_experiment_with_multi_objective(with_batch=True)
     exp.trials[0].run()
     gs = choose_generation_strategy(
         search_space=exp.search_space,
         optimization_config=exp.optimization_config)
     gs._model = Models.BOTORCH(experiment=exp, data=exp.fetch_data())
     plots = get_standard_plots(experiment=exp, model=gs.model)
     self.assertEqual(len(plots), 6)
예제 #16
0
    def test_get_best_trial_moo(self):
        experiment = get_branin_experiment_with_multi_objective()
        experiment.runner = self.runner

        scheduler = Scheduler(
            experiment=experiment,
            generation_strategy=self.sobol_GPEI_GS,
            options=SchedulerOptions(init_seconds_between_polls=0.1),
        )

        scheduler.run_n_trials(max_trials=1)

        with self.assertRaisesRegex(
                NotImplementedError,
                "Please use `get_pareto_optimal_parameters`"):
            scheduler.get_best_trial()

        with self.assertRaisesRegex(
                NotImplementedError,
                "Please use `get_pareto_optimal_parameters`"):
            scheduler.get_best_parameters()

        self.assertIsNotNone(scheduler.get_pareto_optimal_parameters())
예제 #17
0
    def test_pareto_frontier(self, _):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp.optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys()))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            observed_frontier = observed_pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds)
            wrapped_frontier_evaluator.assert_called_once()
            self.assertIsNone(wrapped_frontier_evaluator.call_args.kwargs["X"])
            self.assertEqual(1, len(observed_frontier))
            self.assertEqual(observed_frontier[0].arm_name, "0_0")

        with self.assertRaises(ValueError):
            predicted_pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=[],
            )

        predicted_frontier = predicted_pareto_frontier(
            modelbridge=modelbridge,
            objective_thresholds=objective_thresholds,
            observation_features=None,
        )
        self.assertEqual(predicted_frontier[0].arm_name, "0_0")

        observation_features = [
            ObservationFeatures(parameters={
                "x1": 0.0,
                "x2": 1.0
            }),
            ObservationFeatures(parameters={
                "x1": 1.0,
                "x2": 0.0
            }),
        ]
        observation_data = [
            ObservationData(
                metric_names=["branin_b", "branin_a"],
                means=np.array([1.0, 2.0]),
                covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            ),
            ObservationData(
                metric_names=["branin_a", "branin_b"],
                means=np.array([3.0, 4.0]),
                covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            ),
        ]
        predicted_frontier = predicted_pareto_frontier(
            modelbridge=modelbridge,
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(len(predicted_frontier) <= 2)
        self.assertIsNone(predicted_frontier[0].arm_name, None)

        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            observed_frontier = pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
                observation_data=observation_data,
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertTrue(
                torch.equal(
                    wrapped_frontier_evaluator.call_args.kwargs["X"],
                    torch.tensor([[1.0, 4.0], [4.0, 1.0]]),
                ))

        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            observed_frontier = pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
                observation_data=observation_data,
                use_model_predictions=False,
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertIsNone(wrapped_frontier_evaluator.call_args.kwargs["X"])
            self.assertTrue(
                torch.equal(
                    wrapped_frontier_evaluator.call_args.kwargs["Y"],
                    torch.tensor([[9.0, 4.0], [16.0, 25.0]]),
                ))
예제 #18
0
    def test_hypervolume(self, _, cuda=False):
        for num_objectives in (2, 3):
            exp = get_branin_experiment_with_multi_objective(
                has_optimization_config=True,
                with_batch=True,
                num_objectives=num_objectives,
            )
            for trial in exp.trials.values():
                trial.mark_running(no_runner_required=True).mark_completed()
            metrics_dict = exp.optimization_config.metrics
            objective_thresholds = [
                ObjectiveThreshold(
                    metric=metrics_dict["branin_a"],
                    bound=0.0,
                    relative=False,
                    op=ComparisonOp.GEQ,
                ),
                ObjectiveThreshold(
                    metric=metrics_dict["branin_b"],
                    bound=1.0,
                    relative=False,
                    op=ComparisonOp.GEQ,
                ),
            ]
            if num_objectives == 3:
                objective_thresholds.append(
                    ObjectiveThreshold(
                        metric=metrics_dict["branin_c"],
                        bound=2.0,
                        relative=False,
                        op=ComparisonOp.GEQ,
                    )
                )
            optimization_config = exp.optimization_config.clone_with_args(
                objective_thresholds=objective_thresholds
            )
            exp.attach_data(
                get_branin_data_multi_objective(
                    trial_indices=exp.trials.keys(), num_objectives=num_objectives
                )
            )
            modelbridge = TorchModelBridge(
                search_space=exp.search_space,
                model=MultiObjectiveBotorchModel(),
                optimization_config=optimization_config,
                transforms=[],
                experiment=exp,
                data=exp.fetch_data(),
                torch_device=torch.device("cuda" if cuda else "cpu"),
                objective_thresholds=objective_thresholds,
            )
            with patch(
                PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator
            ) as wrapped_frontier_evaluator:
                modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
                hv = observed_hypervolume(
                    modelbridge=modelbridge, objective_thresholds=objective_thresholds
                )
                expected_hv = 20 if num_objectives == 2 else 60  # 5 * 4 (* 3)
                wrapped_frontier_evaluator.assert_called_once()
                self.assertEqual(expected_hv, hv)
                if num_objectives == 3:
                    # Test selected_metrics
                    hv = observed_hypervolume(
                        modelbridge=modelbridge,
                        objective_thresholds=objective_thresholds,
                        selected_metrics=["branin_a", "branin_c"],
                    )
                    expected_hv = 15  # (5 - 0) * (5 - 2)
                    self.assertEqual(expected_hv, hv)
                    # test that non-objective outcome raises value error
                    with self.assertRaises(ValueError):
                        hv = observed_hypervolume(
                            modelbridge=modelbridge,
                            objective_thresholds=objective_thresholds,
                            selected_metrics=["tracking"],
                        )

            with self.assertRaises(ValueError):
                predicted_hypervolume(
                    modelbridge=modelbridge,
                    objective_thresholds=objective_thresholds,
                    observation_features=[],
                )

            observation_features = [
                ObservationFeatures(parameters={"x1": 1.0, "x2": 2.0}),
                ObservationFeatures(parameters={"x1": 2.0, "x2": 1.0}),
            ]
            predicted_hv = predicted_hypervolume(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
            )
            self.assertTrue(predicted_hv >= 0)
            if num_objectives == 3:
                # Test selected_metrics
                predicted_hv = predicted_hypervolume(
                    modelbridge=modelbridge,
                    objective_thresholds=objective_thresholds,
                    observation_features=observation_features,
                    selected_metrics=["branin_a", "branin_c"],
                )
                self.assertTrue(predicted_hv >= 0)
예제 #19
0
    def test_MTGP_NEHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        metrics = single_obj_exp.optimization_config.objective.metrics
        metrics[0].lower_is_better = True
        objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0], bound=0.0, relative=False)
        ]
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                objective_thresholds=objective_thresholds,
            )

        multi_obj_exp = get_branin_experiment_with_multi_objective(with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics[0], bound=0.0, relative=False, op=ComparisonOp.GEQ
            ),
            ObjectiveThreshold(
                metric=metrics[1], bound=0.0, relative=False, op=ComparisonOp.GEQ
            ),
        ]
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
            )

        multi_obj_exp.trials[0].run()
        sobol_generator = get_sobol(search_space=multi_obj_exp.search_space)
        sobol_run = sobol_generator.gen(n=3)
        multi_obj_exp.new_batch_trial(optimize_for_power=False).add_generator_run(
            sobol_run
        )
        multi_obj_exp.trials[1].run()
        mt_ehvi = get_MTGP_NEHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            objective_thresholds=multi_objective_thresholds,
            trial_index=1,
        )
        self.assertIsInstance(mt_ehvi, TorchModelBridge)
        self.assertIsInstance(mt_ehvi.model.model.models[0], MultiTaskGP)
        task_covar_factor = mt_ehvi.model.model.models[0].task_covar_module.covar_factor
        self.assertEqual(task_covar_factor.shape, torch.Size([2, 2]))
        mt_ehvi_run = mt_ehvi.gen(
            n=1, fixed_features=ObservationFeatures(parameters={}, trial_index=1)
        )
        self.assertEqual(len(mt_ehvi_run.arms), 1)

        # Bad index given
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
                trial_index=999,
            )

        # Multi-type + multi-objective experiment
        multi_type_multi_obj_exp = get_multi_type_experiment_with_multi_objective(
            add_trials=True
        )
        data = multi_type_multi_obj_exp.fetch_data()
        mt_ehvi = get_MTGP_NEHVI(
            experiment=multi_type_multi_obj_exp,
            data=data,
            objective_thresholds=multi_objective_thresholds,
        )
예제 #20
0
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys())
        )
        data = exp.fetch_data()
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        search_space.add_parameter_constraints(param_constraints)
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                )
            )
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"], expected_obj_weights)
            )
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )
        # test using MTGP
        sobol_generator = get_sobol(
            search_space=exp.search_space,
            seed=TEST_SOBOL_SEED,
            # set initial position equal to the number of sobol arms generated
            # so far. This means that new sobol arms will complement the previous
            # arms in a space-filling fashion
            init_position=len(exp.arms_by_name) - 1,
        )
        sobol_run = sobol_generator.gen(n=2)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        torch.manual_seed(0)  # make model fitting deterministic
        modelbridge = TorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                )
            )
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "_untransform_objective_thresholds",
                    wraps=modelbridge._untransform_objective_thresholds,
                )
            )
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack([df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound])
                < nadir.numpy()
            )
        )
예제 #21
0
    def testObservedParetoFrontiers(self):
        experiment = get_branin_experiment_with_multi_objective(
            with_batch=True,
            has_optimization_config=False,
            with_status_quo=True)

        # Optimization config is not optional
        with self.assertRaises(ValueError):
            get_observed_pareto_frontiers(experiment=experiment, data=Data())

        objectives = [
            Objective(
                metric=BraninMetric(name="m1",
                                    param_names=["x1", "x2"],
                                    lower_is_better=True),
                minimize=True,
            ),
            Objective(
                metric=NegativeBraninMetric(name="m2",
                                            param_names=["x1", "x2"],
                                            lower_is_better=True),
                minimize=True,
            ),
            Objective(
                metric=BraninMetric(name="m3",
                                    param_names=["x1", "x2"],
                                    lower_is_better=True),
                minimize=True,
            ),
        ]
        bounds = [0, -100, 0]
        objective_thresholds = [
            ObjectiveThreshold(
                metric=objective.metric,
                bound=bounds[i],
                relative=True,
                op=ComparisonOp.LEQ,
            ) for i, objective in enumerate(objectives)
        ]
        objective = MultiObjective(objectives=objectives)
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=objective,
            objective_thresholds=objective_thresholds,
        )
        experiment.optimization_config = optimization_config
        experiment.trials[0].run()

        # For the check below, compute which arms are better than SQ
        df = experiment.fetch_data().df
        df["sem"] = np.nan
        data = Data(df)
        sq_val = df[(df["arm_name"] == "status_quo")
                    & (df["metric_name"] == "m1")]["mean"].values[0]
        pareto_arms = sorted(
            df[(df["mean"] <= sq_val)
               & (df["metric_name"] == "m1")]["arm_name"].unique().tolist())

        pfrs = get_observed_pareto_frontiers(experiment=experiment, data=data)
        # We have all pairs of metrics
        self.assertEqual(len(pfrs), 3)
        true_pairs = [("m1", "m2"), ("m1", "m3"), ("m2", "m3")]
        for i, pfr in enumerate(pfrs):
            self.assertEqual(pfr.primary_metric, true_pairs[i][0])
            self.assertEqual(pfr.secondary_metric, true_pairs[i][1])
            self.assertEqual(pfr.absolute_metrics, [])
            self.assertEqual(list(pfr.means.keys()), ["m1", "m2", "m3"])
            self.assertEqual(len(pfr.means["m1"]), len(pareto_arms))
            self.assertTrue(np.isnan(pfr.sems["m1"]).all())
            self.assertEqual(len(pfr.arm_names), len(pareto_arms))
            arm_idx = np.argsort(pfr.arm_names)
            for i, idx in enumerate(arm_idx):
                name = pareto_arms[i]
                self.assertEqual(pfr.arm_names[idx], name)
                self.assertEqual(pfr.param_dicts[idx],
                                 experiment.arms_by_name[name].parameters)
예제 #22
0
    def test_get_standard_plots(self):
        exp = get_branin_experiment()
        self.assertEqual(
            len(
                get_standard_plots(experiment=exp,
                                   model=get_generation_strategy().model)),
            0,
        )
        exp = get_branin_experiment(with_batch=True, minimize=True)
        exp.trials[0].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
        )
        self.assertEqual(len(plots), 6)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        exp = get_branin_experiment_with_multi_objective(with_batch=True)
        exp.optimization_config.objective.objectives[0].minimize = False
        exp.optimization_config.objective.objectives[1].minimize = True
        exp.optimization_config._objective_thresholds = [
            ObjectiveThreshold(metric=exp.metrics["branin_a"],
                               op=ComparisonOp.GEQ,
                               bound=-100.0),
            ObjectiveThreshold(metric=exp.metrics["branin_b"],
                               op=ComparisonOp.LEQ,
                               bound=100.0),
        ]
        exp.trials[0].run()
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        # All plots are successfully created when objective thresholds are absent
        exp.optimization_config._objective_thresholds = []
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        exp = get_branin_experiment_with_timestamp_map_metric(
            with_status_quo=True)
        exp.new_trial().add_arm(exp.status_quo)
        exp.trials[0].run()
        exp.new_trial(generator_run=Models.SOBOL(
            search_space=exp.search_space).gen(n=1))
        exp.trials[1].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
            true_objective_metric_name="branin",
        )

        self.assertEqual(len(plots), 9)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        self.assertIn(
            "Objective branin_map vs. True Objective Metric branin",
            [p.layout.title.text for p in plots],
        )

        with self.assertRaisesRegex(
                ValueError, "Please add a valid true_objective_metric_name"):
            plots = get_standard_plots(
                experiment=exp,
                model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
                true_objective_metric_name="not_present",
            )
    def test_hypervolume(self):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            hv = modelbridge.observed_hypervolume(
                objective_thresholds=objective_thresholds)
            expected_hv = 25  # (5 - 0) * (5 - 0)
            wrapped_frontier_evaluator.assert_called_once()
            self.assertEqual(expected_hv, hv)

        with self.assertRaises(ValueError):
            modelbridge.predicted_hypervolume(
                objective_thresholds=objective_thresholds,
                observation_features=[])

        observation_features = [
            ObservationFeatures(parameters={
                "x1": 1.0,
                "x2": 2.0
            }),
            ObservationFeatures(parameters={
                "x1": 2.0,
                "x2": 1.0
            }),
        ]
        predicted_hv = modelbridge.predicted_hypervolume(
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(predicted_hv >= 0)
    def test_infer_objective_thresholds(self, _, cuda=False):
        # lightweight test
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True,
            with_batch=True,
            with_status_quo=True,
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys()))
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=Cont_X_trans + Y_trans,
            torch_device=torch.device("cuda" if cuda else "cpu"),
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={"x1": 0.0})
        search_space = exp.search_space.clone()
        param_constraints = [
            ParameterConstraint(constraint_dict={"x1": 1.0}, bound=10.0)
        ]
        outcome_constraints = [
            OutcomeConstraint(
                metric=exp.metrics["branin_a"],
                op=ComparisonOp.GEQ,
                bound=-40.0,
                relative=False,
            )
        ]
        search_space.add_parameter_constraints(param_constraints)
        exp.optimization_config.outcome_constraints = outcome_constraints
        oc = exp.optimization_config.clone()
        oc.objective._objectives[0].minimize = True
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=oc,
            fixed_features=fixed_features,
        )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_get_transformed_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_gen_args",
                    wraps=modelbridge._get_transformed_gen_args,
                ))
            mock_get_transformed_model_gen_args = es.enter_context(
                patch.object(
                    modelbridge,
                    "_get_transformed_model_gen_args",
                    wraps=modelbridge._get_transformed_model_gen_args,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=oc,
                fixed_features=fixed_features,
            )
            expected_obj_weights = torch.tensor([-1.0, 1.0])
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            # check that transforms have been applied (at least UnitX)
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            oc = ckwargs["outcome_constraints"]
            self.assertTrue(torch.equal(oc[0], torch.tensor([[-1.0, 0.0]])))
            self.assertTrue(torch.equal(oc[1], torch.tensor([[45.0]])))
            lc = ckwargs["linear_constraints"]
            self.assertTrue(torch.equal(lc[0], torch.tensor([[15.0, 0.0]])))
            self.assertTrue(torch.equal(lc[1], torch.tensor([[15.0]])))
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
            mock_get_transformed_gen_args.assert_called_once()
            mock_get_transformed_model_gen_args.assert_called_once_with(
                search_space=expected_base_gen_args.search_space,
                fixed_features=expected_base_gen_args.fixed_features,
                pending_observations=expected_base_gen_args.
                pending_observations,
                optimization_config=expected_base_gen_args.optimization_config,
            )
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]

            self.assertTrue(
                torch.equal(ckwargs["objective_weights"],
                            expected_obj_weights))
            self.assertEqual(ckwargs["bounds"], [(0.0, 1.0), (0.0, 1.0)])
            self.assertEqual(ckwargs["fixed_features"], {0: 1.0 / 3.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.LEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        Y = np.stack([df.branin_a.values, df.branin_b.values]).T
        Y = torch.from_numpy(Y)
        Y[:, 0] *= -1
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([-obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))
        # test using MTGP
        sobol_generator = get_sobol(search_space=exp.search_space)
        sobol_run = sobol_generator.gen(n=5)
        trial = exp.new_batch_trial(optimize_for_power=True)
        trial.add_generator_run(sobol_run)
        trial.mark_running(no_runner_required=True).mark_completed()
        data = exp.fetch_data()
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=ST_MTGP_trans,
            experiment=exp,
            data=data,
        )
        fixed_features = ObservationFeatures(parameters={}, trial_index=1)
        expected_base_gen_args = modelbridge._get_transformed_gen_args(
            search_space=search_space.clone(),
            optimization_config=exp.optimization_config,
            fixed_features=fixed_features,
        )
        with self.assertRaises(ValueError):
            # Check that a ValueError is raised when MTGP is being used
            # and trial_index is not specified as a fixed features.
            # Note: this error is raised by StratifiedStandardizeY
            modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
            )
        with ExitStack() as es:
            mock_model_infer_obj_t = es.enter_context(
                patch(
                    "ax.modelbridge.multi_objective_torch.infer_objective_thresholds",
                    wraps=infer_objective_thresholds,
                ))
            mock_untransform_objective_thresholds = es.enter_context(
                patch.object(
                    modelbridge,
                    "untransform_objective_thresholds",
                    wraps=modelbridge.untransform_objective_thresholds,
                ))
            obj_thresholds = modelbridge.infer_objective_thresholds(
                search_space=search_space,
                optimization_config=exp.optimization_config,
                fixed_features=fixed_features,
            )
            ckwargs = mock_model_infer_obj_t.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
            mock_untransform_objective_thresholds.assert_called_once()
            ckwargs = mock_untransform_objective_thresholds.call_args[1]
            self.assertEqual(ckwargs["fixed_features"], {2: 1.0})
        self.assertEqual(obj_thresholds[0].metric.name, "branin_a")
        self.assertEqual(obj_thresholds[1].metric.name, "branin_b")
        self.assertEqual(obj_thresholds[0].op, ComparisonOp.GEQ)
        self.assertEqual(obj_thresholds[1].op, ComparisonOp.GEQ)
        self.assertFalse(obj_thresholds[0].relative)
        self.assertFalse(obj_thresholds[1].relative)
        df = exp_to_df(exp)
        trial_mask = df.trial_index == 1
        Y = np.stack(
            [df.branin_a.values[trial_mask], df.branin_b.values[trial_mask]]).T
        Y = torch.from_numpy(Y)
        pareto_Y = Y[is_non_dominated(Y)]
        nadir = pareto_Y.min(dim=0).values
        self.assertTrue(
            np.all(
                np.array([obj_thresholds[0].bound, obj_thresholds[1].bound]) <
                nadir.numpy()))