예제 #1
0
    def test_immutable_search_space_and_opt_config(self):
        mutable_exp = self._setupBraninExperiment(n=5)
        self.assertFalse(mutable_exp.immutable_search_space_and_opt_config)
        immutable_exp = Experiment(
            name="test4",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            optimization_config=get_branin_optimization_config(),
            runner=SyntheticRunner(),
            properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},
        )
        self.assertTrue(immutable_exp.immutable_search_space_and_opt_config)
        with self.assertRaises(UnsupportedError):
            immutable_exp.optimization_config = get_branin_optimization_config(
            )
        immutable_exp.new_batch_trial()
        with self.assertRaises(UnsupportedError):
            immutable_exp.search_space = get_branin_search_space()

        # Check that passing the property as just a string is processed
        # correctly.
        immutable_exp_2 = Experiment(
            name="test4",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            runner=SyntheticRunner(),
            properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value: True},
        )
        self.assertTrue(immutable_exp_2.immutable_search_space_and_opt_config)
예제 #2
0
 def setUp(self):
     self.branin_experiment = get_branin_experiment()
     self.branin_experiment._properties[
         Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF] = True
     self.branin_experiment_no_impl_metrics = Experiment(
         search_space=get_branin_search_space(),
         optimization_config=OptimizationConfig(objective=Objective(
             metric=Metric(name="branin"))),
     )
     self.sobol_GPEI_GS = choose_generation_strategy(
         search_space=get_branin_search_space())
     self.two_sobol_steps_GS = GenerationStrategy(  # Contrived GS to ensure
         steps=[  # that `DataRequiredError` is property handled in scheduler.
             GenerationStep(  # This error is raised when not enough trials
                 model=Models.
                 SOBOL,  # have been observed to proceed to next
                 num_trials=5,  # geneneration step.
                 min_trials_observed=3,
                 max_parallelism=2,
             ),
             GenerationStep(model=Models.SOBOL,
                            num_trials=-1,
                            max_parallelism=3),
         ])
     # GS to force the scheduler to poll completed trials after each ran trial.
     self.sobol_GS_no_parallelism = GenerationStrategy(steps=[
         GenerationStep(
             model=Models.SOBOL, num_trials=-1, max_parallelism=1)
     ])
예제 #3
0
 def test_max_parallelism_adjustments(self):
     # No adjustment.
     sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space())
     self.assertIsNone(sobol_gpei._steps[0].max_parallelism)
     self.assertEqual(
         sobol_gpei._steps[1].max_parallelism, DEFAULT_BAYESIAN_PARALLELISM
     )
     # Impose a cap of 1 on max parallelism for all steps.
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(), max_parallelism_cap=1
     )
     self.assertEqual(
         sobol_gpei._steps[0].max_parallelism,
         sobol_gpei._steps[1].max_parallelism,
         1,
     )
     # Disable enforcing max parallelism for all steps.
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(), max_parallelism_override=-1
     )
     self.assertIsNone(sobol_gpei._steps[0].max_parallelism)
     self.assertIsNone(sobol_gpei._steps[1].max_parallelism)
     # Override max parallelism for all steps.
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(), max_parallelism_override=10
     )
     self.assertEqual(sobol_gpei._steps[0].max_parallelism, 10)
     self.assertEqual(sobol_gpei._steps[1].max_parallelism, 10)
예제 #4
0
 def test_basic(self):
     """Run through the benchmarking loop."""
     results = full_benchmark_run(
         problem_groups={
             self.CATEGORY_NAME: [
                 SimpleBenchmarkProblem(branin, noise_sd=0.4),
                 BenchmarkProblem(
                     name="Branin",
                     search_space=get_branin_search_space(),
                     optimization_config=get_branin_optimization_config(),
                 ),
                 BenchmarkProblem(
                     search_space=get_branin_search_space(),
                     optimization_config=get_optimization_config(),
                 ),
             ]
         },
         method_groups={
             self.CATEGORY_NAME: [
                 GenerationStrategy(steps=[
                     GenerationStep(model=Models.SOBOL, num_trials=-1)
                 ])
             ]
         },
         num_replications=3,
         num_trials=5,
         # Just to have it be more telling if something is broken
         raise_all_exceptions=True,
         batch_size=[[1], [3], [1]],
     )
     self.assertEqual(len(results["Branin"]["Sobol"]), 3)
예제 #5
0
 def test_enforce_sequential_optimization(self):
     sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space())
     self.assertEqual(sobol_gpei._steps[0].num_arms, 5)
     self.assertTrue(sobol_gpei._steps[0].enforce_num_arms)
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(),
         enforce_sequential_optimization=False,
     )
     self.assertEqual(sobol_gpei._steps[0].num_arms, 5)
     self.assertFalse(sobol_gpei._steps[0].enforce_num_arms)
예제 #6
0
 def test_max_parallelism_adjustments(self):
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(), max_parallelism_cap=1)
     self.assertEqual(
         sobol_gpei._steps[0].max_parallelism,
         sobol_gpei._steps[1].max_parallelism,
         1,
     )
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(), no_max_parallelism=True)
     self.assertIsNone(sobol_gpei._steps[0].max_parallelism)
     self.assertIsNone(sobol_gpei._steps[1].max_parallelism)
예제 #7
0
 def test_enforce_sequential_optimization(self):
     sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space())
     self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
     self.assertTrue(sobol_gpei._steps[0].enforce_num_trials)
     self.assertIsNotNone(sobol_gpei._steps[1].max_parallelism)
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(),
         enforce_sequential_optimization=False,
     )
     self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
     self.assertFalse(sobol_gpei._steps[0].enforce_num_trials)
     self.assertIsNone(sobol_gpei._steps[1].max_parallelism)
예제 #8
0
 def test_choose_generation_strategy(self):
     sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space())
     self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
     self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
     self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI")
     sobol = choose_generation_strategy(search_space=get_factorial_search_space())
     self.assertEqual(sobol._steps[0].model.value, "Sobol")
     self.assertEqual(len(sobol._steps), 1)
     sobol_gpei_batched = choose_generation_strategy(
         search_space=get_branin_search_space(), use_batch_trials=3
     )
     self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1)
예제 #9
0
 def test_choose_generation_strategy(self):
     with self.subTest("GPEI"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space()
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI")
     with self.subTest("MOO"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space(),
             optimization_config=MultiObjectiveOptimizationConfig(
                 objective=MultiObjective(objectives=[])
             ),
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "MOO")
     with self.subTest("Sobol (we can try every option)"):
         sobol = choose_generation_strategy(
             search_space=get_factorial_search_space(), num_trials=1000
         )
         self.assertEqual(sobol._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol._steps), 1)
     with self.subTest("Sobol (because of too many categories)"):
         sobol_large = choose_generation_strategy(
             search_space=get_large_factorial_search_space()
         )
         self.assertEqual(sobol_large._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol_large._steps), 1)
     with self.subTest("GPEI-Batched"):
         sobol_gpei_batched = choose_generation_strategy(
             search_space=get_branin_search_space(), use_batch_trials=3
         )
         self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1)
     with self.subTest("BO_MIXED (purely categorical)"):
         bo_mixed = choose_generation_strategy(
             search_space=get_factorial_search_space()
         )
         self.assertEqual(bo_mixed._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed._steps[1].model.value, "BO_MIXED")
     with self.subTest("BO_MIXED (mixed search space)"):
         bo_mixed_2 = choose_generation_strategy(
             search_space=get_branin_search_space(with_choice_parameter=True)
         )
         self.assertEqual(bo_mixed_2._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed_2._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed_2._steps[1].model.value, "BO_MIXED")
예제 #10
0
 def test_set_should_deduplicate(self):
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(),
         use_batch_trials=True,
         num_initialization_trials=3,
     )
     self.assertListEqual([s.should_deduplicate for s in sobol_gpei._steps],
                          [False] * 2)
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(),
         use_batch_trials=True,
         num_initialization_trials=3,
         should_deduplicate=True,
     )
     self.assertListEqual([s.should_deduplicate for s in sobol_gpei._steps],
                          [True] * 2)
예제 #11
0
def get_branin_benchmark_problem() -> BenchmarkProblem:
    return BenchmarkProblem(
        search_space=get_branin_search_space(),
        optimization_config=get_branin_optimization_config(),
        optimal_value=branin.fmin,
        evaluate_suggested=False,
    )
예제 #12
0
 def test_fixed_num_initialization_trials(self):
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(),
         use_batch_trials=True,
         num_initialization_trials=3,
     )
     self.assertEqual(sobol_gpei._steps[0].num_trials, 3)
예제 #13
0
    def test_minimize_callable(self):
        problem = BenchmarkProblem(
            name="Branin",
            search_space=get_branin_search_space(),
            optimization_config=get_branin_optimization_config(),
        )

        experiment, f = benchmark_minimize_callable(problem=problem,
                                                    num_trials=20,
                                                    method_name="scipy",
                                                    replication_index=2)
        res = minimize(
            fun=f,
            x0=np.zeros(2),
            bounds=[(-5, 10), (0, 15)],
            options={"maxiter": 3},
            method="Nelder-Mead",
        )
        self.assertTrue(res.fun < 0)  # maximization problem
        self.assertEqual(len(experiment.trials), res.nfev)
        self.assertEqual(len(experiment.fetch_data().df), res.nfev)
        self.assertEqual(experiment.name, "scipy_on_Branin__v2")
        with self.assertRaises(ValueError):
            minimize(fun=f,
                     x0=np.zeros(2),
                     bounds=[(-5, 10), (0, 15)],
                     method="Nelder-Mead")
예제 #14
0
 def setUp(self) -> None:
     self.experiment = SimpleExperiment(
         name="test_branin",
         search_space=get_branin_search_space(),
         evaluation_function=sum_evaluation_function,
         objective_name="sum",
     )
     self.arms = [
         Arm(parameters={
             "x1": 0.75,
             "x2": 1
         }),
         Arm(parameters={
             "x1": 2,
             "x2": 7
         }),
         Arm(parameters={
             "x1": 10,
             "x2": 8
         }),
         Arm(parameters={
             "x1": -2,
             "x2": 10
         }),
     ]
예제 #15
0
 def test_winsorization(self):
     winsorized = choose_generation_strategy(
         search_space=get_branin_search_space(),
         winsorization_config=WinsorizationConfig(upper_quantile_margin=2),
     )
     self.assertIn(
         "Winsorize",
         winsorized._steps[1].model_kwargs.get("transform_configs"))
예제 #16
0
 def testDeprecation(self) -> None:
     with patch.object(warnings, "warn") as mock_warn:
         SimpleExperiment(
             name="test_branin",
             search_space=get_branin_search_space(),
             objective_name="sum",
         )
         mock_warn.assert_called_once()
예제 #17
0
 def test_winsorization(self):
     winsorized = choose_generation_strategy(
         search_space=get_branin_search_space(),
         winsorize_botorch_model=True,
         winsorization_limits=(None, 0, 2),
     )
     self.assertIn(
         "Winsorize", winsorized._steps[1].model_kwargs.get("transform_configs")
     )
예제 #18
0
    def testUnimplementedEvaluationFunction(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
        )
        with self.assertRaises(Exception):
            experiment.evaluation_function(parameterization={})

        experiment.evaluation_function = sum_evaluation_function
    def testOptionalObjectiveName(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
    def testEvaluationFunctionV4Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
예제 #21
0
 def testExperimentWithoutName(self):
     exp = Experiment(
         search_space=get_branin_search_space(),
         tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
         runner=SyntheticRunner(),
     )
     self.assertEqual("Experiment(None)", str(exp))
     batch = exp.new_batch_trial()
     batch.add_arms_and_weights(arms=get_branin_arms(n=5, seed=0))
     batch.run()
     self.assertEqual(batch.run_metadata, {"name": "0"})
예제 #22
0
    def test_fetch_as_class(self):
        class MyMetric(Metric):
            @property
            def fetch_multi_group_by_metric(self) -> Type[Metric]:
                return Metric

        m = MyMetric(name="test_metric")
        exp = Experiment(
            name="test",
            search_space=get_branin_search_space(),
            tracking_metrics=[m],
            runner=SyntheticRunner(),
        )
        self.assertEqual(exp._metrics_by_class(), {Metric: [m]})
예제 #23
0
    def _setupBraninExperiment(self, n: int) -> Experiment:
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[BraninMetric(name="b", param_names=["x1", "x2"])],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        batch.add_arms_and_weights(arms=get_branin_arms(n=n, seed=0))
        batch.run()

        batch_2 = exp.new_batch_trial()
        batch_2.add_arms_and_weights(arms=get_branin_arms(n=3 * n, seed=1))
        batch_2.run()
        return exp
 def testLowerBound(self):
     branin_lb = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(
                     name="branin_objective", param_names=["x1", "x2"], noise_sd=5.0
                 ),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.GEQ,
                     bound=5.0,
                     relative=False,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         batch_size=2,
         total_iterations=4,
         bo_strategies=[
             GenerationStrategy([GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_lb],
     )
     suite.generate_report(include_individual=True)
 def testRelativeConstraint(self):
     branin_rel = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(
                     name="branin_objective", param_names=["x1", "x2"], noise_sd=5.0
                 ),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.LEQ,
                     bound=5.0,
                     relative=True,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         total_iterations=5,
         bo_strategies=[
             GenerationStrategy([GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_rel],
     )
     with self.assertRaises(ValueError):
         suite.generate_report()
예제 #26
0
    def testStatusQuoSetter(self):
        sq_parameters = self.experiment.status_quo.parameters
        self.experiment.status_quo = None
        self.assertIsNone(self.experiment.status_quo)

        # Verify normal update
        sq_parameters["w"] = 3.5
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(self.experiment.status_quo.parameters["w"], 3.5)
        self.assertEqual(self.experiment.status_quo.name, "status_quo")
        self.assertTrue("status_quo" in self.experiment.arms_by_name)

        # Verify all None values
        self.experiment.status_quo = Arm(
            {n: None
             for n in sq_parameters.keys()})
        self.assertIsNone(self.experiment.status_quo.parameters["w"])

        # Try extra param
        sq_parameters["a"] = 4
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Try wrong type
        sq_parameters.pop("a")
        sq_parameters["w"] = "hello"
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Verify arms_by_signature, arms_by_name only contains status_quo
        self.assertEqual(len(self.experiment.arms_by_signature), 1)
        self.assertEqual(len(self.experiment.arms_by_name), 1)

        # Change status quo, verify still just 1 arm
        sq_parameters["w"] = 3.6
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(len(self.experiment.arms_by_signature), 1)
        self.assertEqual(len(self.experiment.arms_by_name), 1)

        # Make a batch, add status quo to it, then change exp status quo, verify 2 arms
        batch = self.experiment.new_batch_trial()
        batch.set_status_quo_with_weight(self.experiment.status_quo, 1)
        sq_parameters["w"] = 3.7
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(len(self.experiment.arms_by_signature), 2)
        self.assertEqual(len(self.experiment.arms_by_name), 2)
        self.assertEqual(self.experiment.status_quo.name, "status_quo_e0")
        self.assertTrue("status_quo_e0" in self.experiment.arms_by_name)

        # Try missing param
        sq_parameters.pop("w")
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Actually name the status quo.
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        arms = get_branin_arms(n=1, seed=0)
        batch.add_arms_and_weights(arms=arms)
        self.assertIsNone(exp.status_quo)
        exp.status_quo = arms[0]
        self.assertEqual(exp.status_quo.name, "0_0")

        # Try setting sq to existing arm with different name
        with self.assertRaises(ValueError):
            exp.status_quo = Arm(arms[0].parameters, name="new_name")
예제 #27
0
    name: str
    fbest: float
    optimization_config: OptimizationConfig
    search_space: SearchSpace


# Branin problems
branin = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmin,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=BraninMetric(
            name="branin_objective", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=True,
    )),
    search_space=get_branin_search_space(),
)

branin_max = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmax,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=NegativeBraninMetric(
            name="neg_branin", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=False,
    )),
    search_space=get_branin_search_space(),
)

# Hartmann 6 problems
예제 #28
0
from ax.utils.testing.core_stubs import (
    get_augmented_branin_optimization_config,
    get_augmented_hartmann_optimization_config,
    get_branin_search_space,
    get_hartmann_search_space,
)
from botorch.test_functions.synthetic import Ackley, Branin


# Initialize the single-fidelity problems
ackley = SimpleBenchmarkProblem(f=from_botorch(Ackley()), noise_sd=0.0, minimize=True)
branin = SimpleBenchmarkProblem(f=from_botorch(Branin()), noise_sd=0.0, minimize=True)
single_fidelity_problem_group = [ackley, branin]

# Initialize the multi-fidelity problems
augmented_branin = BenchmarkProblem(
    search_space=get_branin_search_space(with_fidelity_parameter=True),
    optimization_config=get_augmented_branin_optimization_config(),
)
augmented_hartmann = BenchmarkProblem(
    search_space=get_hartmann_search_space(with_fidelity_parameter=True),
    optimization_config=get_augmented_hartmann_optimization_config(),
)
multi_fidelity_problem_group = [augmented_branin, augmented_hartmann]

# Gather all of the problems
MODULAR_BOTORCH_PROBLEM_GROUPS = {
    "single_fidelity_models": single_fidelity_problem_group,
    "multi_fidelity_models": multi_fidelity_problem_group,
}
예제 #29
0
 def test_use_batch_trials(self):
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(), use_batch_trials=True
     )
     self.assertEqual(sobol_gpei._steps[0].num_trials, 1)
예제 #30
0
 def test_max_parallelism_override(self):
     sobol_gpei = choose_generation_strategy(
         search_space=get_branin_search_space(), max_parallelism_override=10
     )
     self.assertTrue(all(s.max_parallelism == 10 for s in sobol_gpei._steps))