示例#1
0
 def test_basic(self):
     """Run through the benchmarking loop."""
     results = full_benchmark_run(
         problem_groups={
             self.CATEGORY_NAME: [
                 SimpleBenchmarkProblem(branin, noise_sd=0.4),
                 BenchmarkProblem(
                     name="Branin",
                     search_space=get_branin_search_space(),
                     optimization_config=get_branin_optimization_config(),
                 ),
                 BenchmarkProblem(
                     search_space=get_branin_search_space(),
                     optimization_config=get_optimization_config(),
                 ),
             ]
         },
         method_groups={
             self.CATEGORY_NAME: [
                 GenerationStrategy(steps=[
                     GenerationStep(model=Models.SOBOL, num_trials=-1)
                 ])
             ]
         },
         num_replications=3,
         num_trials=5,
         # Just to have it be more telling if something is broken
         raise_all_exceptions=True,
         batch_size=[[1], [3], [1]],
     )
     self.assertEqual(len(results["Branin"]["Sobol"]), 3)
示例#2
0
    def test_minimize_callable(self):
        problem = BenchmarkProblem(
            name="Branin",
            search_space=get_branin_search_space(),
            optimization_config=get_branin_optimization_config(),
        )

        experiment, f = benchmark_minimize_callable(problem=problem,
                                                    num_trials=20,
                                                    method_name="scipy",
                                                    replication_index=2)
        res = minimize(
            fun=f,
            x0=np.zeros(2),
            bounds=[(-5, 10), (0, 15)],
            options={"maxiter": 3},
            method="Nelder-Mead",
        )
        self.assertTrue(res.fun < 0)  # maximization problem
        self.assertEqual(len(experiment.trials), res.nfev)
        self.assertEqual(len(experiment.fetch_data().df), res.nfev)
        self.assertEqual(experiment.name, "scipy_on_Branin__v2")
        with self.assertRaises(ValueError):
            minimize(fun=f,
                     x0=np.zeros(2),
                     bounds=[(-5, 10), (0, 15)],
                     method="Nelder-Mead")
示例#3
0
def get_branin_benchmark_problem() -> BenchmarkProblem:
    return BenchmarkProblem(
        search_space=get_branin_search_space(),
        optimization_config=get_branin_optimization_config(),
        optimal_value=branin.fmin,
        evaluate_suggested=False,
    )
示例#4
0
 def testRelativeConstraint(self):
     branin_rel = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.LEQ,
                     bound=5.0,
                     relative=True,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         total_iterations=5,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_rel],
     )
     with self.assertRaises(ValueError):
         suite.generate_report()
示例#5
0
 def testLowerBound(self):
     branin_lb = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.GEQ,
                     bound=5.0,
                     relative=False,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         batch_size=2,
         total_iterations=4,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_lb],
     )
     suite.generate_report(include_individual=True)
示例#6
0
from ax.utils.testing.core_stubs import (
    get_augmented_branin_optimization_config,
    get_augmented_hartmann_optimization_config,
    get_branin_search_space,
    get_hartmann_search_space,
)
from botorch.test_functions.synthetic import Ackley, Branin


# Initialize the single-fidelity problems
ackley = SimpleBenchmarkProblem(f=from_botorch(Ackley()), noise_sd=0.0, minimize=True)
branin = SimpleBenchmarkProblem(f=from_botorch(Branin()), noise_sd=0.0, minimize=True)
single_fidelity_problem_group = [ackley, branin]

# Initialize the multi-fidelity problems
augmented_branin = BenchmarkProblem(
    search_space=get_branin_search_space(with_fidelity_parameter=True),
    optimization_config=get_augmented_branin_optimization_config(),
)
augmented_hartmann = BenchmarkProblem(
    search_space=get_hartmann_search_space(with_fidelity_parameter=True),
    optimization_config=get_augmented_hartmann_optimization_config(),
)
multi_fidelity_problem_group = [augmented_branin, augmented_hartmann]

# Gather all of the problems
MODULAR_BOTORCH_PROBLEM_GROUPS = {
    "single_fidelity_models": single_fidelity_problem_group,
    "multi_fidelity_models": multi_fidelity_problem_group,
}
示例#7
0
# x = np.arange(100)
# np.random.seed(10)
# np.random.shuffle(x)
# print(x[:6])  # [19 14 43 37 66  3]

hartmann6_100 = BenchmarkProblem(
    name="Hartmann6, D=100",
    optimal_value=-3.32237,
    optimization_config=OptimizationConfig(
        objective=Objective(
            metric=Hartmann6Metric(
                name="objective",
                param_names=["x19", "x14", "x43", "x37", "x66", "x3"],
                noise_sd=0.0,
            ),
            minimize=True,
        )
    ),
    search_space=SearchSpace(
        parameters=[
            RangeParameter(
                name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0
            )
            for i in range(100)
        ]
    ),
)

            
hartmann6_1000 = BenchmarkProblem(
    name="Hartmann6, D=1000",
    optimal_value=-3.32237,