Exemplo n.º 1
0
    def testUnimplementedEvaluationFunction(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
        )
        with self.assertRaises(Exception):
            experiment.evaluation_function(parameterization={})

        experiment.evaluation_function = sum_evaluation_function
Exemplo n.º 2
0
    def testOptionalObjectiveName(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            evaluation_function=sum_evaluation_function_v2,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
Exemplo n.º 3
0
    def testEvaluationFunctionV4Numpy(self) -> None:
        experiment = SimpleExperiment(
            name="test_branin",
            search_space=get_branin_search_space(),
            objective_name="sum",
            evaluation_function=sum_evaluation_function_v4_numpy,
        )

        for i in range(len(self.arms)):
            experiment.new_trial(generator_run=GeneratorRun(arms=[self.arms[i]]))
        self.assertFalse(experiment.eval().df.empty)
Exemplo n.º 4
0
 def setUp(self) -> None:
     self.experiment = SimpleExperiment(
         name="test_branin",
         search_space=get_branin_search_space(),
         evaluation_function=sum_evaluation_function,
         objective_name="sum",
     )
     self.arms = [
         Arm(parameters={"x1": 0.75, "x2": 1}),
         Arm(parameters={"x1": 2, "x2": 7}),
         Arm(parameters={"x1": 10, "x2": 8}),
         Arm(parameters={"x1": -2, "x2": 10}),
     ]
Exemplo n.º 5
0
 def testExperimentWithoutName(self) -> Experiment:
     exp = Experiment(
         search_space=get_branin_search_space(),
         tracking_metrics=[
             BraninMetric(name="b", param_names=["x1", "x2"])
         ],
         runner=SyntheticRunner(),
     )
     self.assertEqual("Experiment(None)", str(exp))
     batch = exp.new_batch_trial()
     batch.add_arms_and_weights(arms=get_branin_arms(n=5, seed=0))
     batch.run()
     self.assertEqual(batch.run_metadata, {"name": "0"})
Exemplo n.º 6
0
    def _setupBraninExperiment(self, n: int) -> Experiment:
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        batch.add_arms_and_weights(arms=get_branin_arms(n=n, seed=0))
        batch.run()

        (exp.new_batch_trial().add_arms_and_weights(
            arms=get_branin_arms(n=3 * n, seed=1)).run())
        return exp
Exemplo n.º 7
0
 def testRelativeConstraint(self):
     branin_rel = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.LEQ,
                     bound=5.0,
                     relative=True,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         total_iterations=5,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_rel],
     )
     with self.assertRaises(ValueError):
         suite.generate_report()
Exemplo n.º 8
0
 def testLowerBound(self):
     branin_lb = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.GEQ,
                     bound=5.0,
                     relative=False,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         batch_size=2,
         total_iterations=4,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_lb],
     )
     suite.generate_report(include_individual=True)
Exemplo n.º 9
0
    def testStatusQuoSetter(self):
        sq_parameters = self.experiment.status_quo.parameters
        self.experiment.status_quo = None
        self.assertIsNone(self.experiment.status_quo)

        # Verify normal update
        sq_parameters["w"] = 3.5
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(self.experiment.status_quo.parameters["w"], 3.5)
        self.assertEqual(self.experiment.status_quo.name, "status_quo")

        # Verify all None values
        self.experiment.status_quo = Arm(
            {n: None
             for n in sq_parameters.keys()})
        self.assertIsNone(self.experiment.status_quo.parameters["w"])

        # Try extra param
        sq_parameters["a"] = 4
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Try wrong type
        sq_parameters.pop("a")
        sq_parameters["w"] = "hello"
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Verify arms_by_signature only contains status_quo
        self.assertEqual(len(self.experiment.arms_by_signature), 1)

        # Change status quo, verify still just 1 arm
        sq_parameters["w"] = 3.6
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(len(self.experiment.arms_by_signature), 1)

        # Make a batch, then change exp status quo, verify 2 arms
        self.experiment.new_batch_trial()
        sq_parameters["w"] = 3.7
        self.experiment.status_quo = Arm(sq_parameters)
        self.assertEqual(len(self.experiment.arms_by_signature), 2)
        self.assertEqual(self.experiment.status_quo.name, "status_quo_e0")

        # Try missing param
        sq_parameters.pop("w")
        with self.assertRaises(ValueError):
            self.experiment.status_quo = Arm(sq_parameters)

        # Actually name the status quo.
        exp = Experiment(
            name="test3",
            search_space=get_branin_search_space(),
            tracking_metrics=[
                BraninMetric(name="b", param_names=["x1", "x2"])
            ],
            runner=SyntheticRunner(),
        )
        batch = exp.new_batch_trial()
        arms = get_branin_arms(n=1, seed=0)
        batch.add_arms_and_weights(arms=arms)
        self.assertIsNone(exp.status_quo)
        exp.status_quo = arms[0]
        self.assertEqual(exp.status_quo.name, "0_0")

        # Try setting sq to existing arm with different name
        with self.assertRaises(ValueError):
            exp.status_quo = Arm(arms[0].parameters, name="new_name")
Exemplo n.º 10
0
    name: str
    fbest: float
    optimization_config: OptimizationConfig
    search_space: SearchSpace


# Branin problems
branin = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmin,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=BraninMetric(
            name="branin_objective", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=True,
    )),
    search_space=get_branin_search_space(),
)

branin_max = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmax,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=NegativeBraninMetric(
            name="neg_branin", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=False,
    )),
    search_space=get_branin_search_space(),
)

# Hartmann 6 problems