Пример #1
0
 def test_best_raw_objective_point(self):
     exp = get_branin_experiment()
     with self.assertRaisesRegex(ValueError, "Cannot identify best "):
         get_best_raw_objective_point(exp)
     self.assertEqual(get_best_parameters(exp), None)
     exp.new_trial(
         generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
     ).run()
     opt_conf = exp.optimization_config.clone()
     opt_conf.objective.metric._name = "not_branin"
     with self.assertRaisesRegex(ValueError, "No data has been logged"):
         get_best_raw_objective_point(exp, opt_conf)
Пример #2
0
 def test_best_raw_objective_point_scalarized(self):
     exp = get_branin_experiment()
     exp.optimization_config = OptimizationConfig(
         ScalarizedObjective(metrics=[get_branin_metric()], minimize=False)
     )
     with self.assertRaisesRegex(ValueError, "Cannot identify best "):
         get_best_raw_objective_point(exp)
     self.assertEqual(get_best_parameters(exp, Models), None)
     exp.new_trial(
         generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
     ).run()
     self.assertEqual(get_best_raw_objective_point(exp)[0], {"x1": 5.0, "x2": 5.0})
Пример #3
0
 def get_best_parameters(
     self,
 ) -> Optional[Tuple[TParameterization, Optional[TModelPredictArm]]]:
     return best_point_utils.get_best_parameters(self.experiment)
Пример #4
0
    def test_best_from_model_prediction(self):
        exp = get_branin_experiment()

        for _ in range(3):
            sobol = Models.SOBOL(search_space=exp.search_space)
            generator_run = sobol.gen(n=1)
            trial = exp.new_trial(generator_run=generator_run)
            trial.run()
            trial.mark_completed()
            exp.attach_data(exp.fetch_data())

        gpei = Models.BOTORCH(experiment=exp, data=exp.lookup_data())
        generator_run = gpei.gen(n=1)
        trial = exp.new_trial(generator_run=generator_run)
        trial.run()
        trial.mark_completed()

        with patch.object(
            ArrayModelBridge,
            "model_best_point",
            return_value=(
                (
                    Arm(
                        name="0_0",
                        parameters={"x1": -4.842811906710267, "x2": 11.887089014053345},
                    ),
                    (
                        {"branin": 34.76260622783635},
                        {"branin": {"branin": 0.00028306433439807734}},
                    ),
                )
            ),
        ) as mock_model_best_point, self.assertLogs(
            logger="ax.service.utils.best_point", level="WARN"
        ) as lg:
            # Test bad model fit causes function to resort back to raw data
            with patch(
                "ax.service.utils.best_point.assess_model_fit",
                return_value=AssessModelFitResult(
                    good_fit_metrics_to_fisher_score={},
                    bad_fit_metrics_to_fisher_score={
                        "branin": 0,
                    },
                ),
            ):
                self.assertIsNotNone(get_best_parameters(exp, Models))
                self.assertTrue(
                    any("Model fit is poor" in warning for warning in lg.output),
                    msg=lg.output,
                )
                mock_model_best_point.assert_not_called()

            # Test model best point is used when fit is good
            with patch(
                "ax.service.utils.best_point.assess_model_fit",
                return_value=AssessModelFitResult(
                    good_fit_metrics_to_fisher_score={
                        "branin": 0,
                    },
                    bad_fit_metrics_to_fisher_score={},
                ),
            ):
                self.assertIsNotNone(get_best_parameters(exp, Models))
                mock_model_best_point.assert_called()

        # Assert the non-mocked method works correctly as well
        self.assertIsNotNone(get_best_parameters(exp, Models))