Пример #1
0
    def testMetricSetters(self):
        # Establish current metrics size
        self.assertEqual(
            len(get_optimization_config().metrics) + 1,
            len(self.experiment.metrics))

        # Add optimization config with 1 different metric
        opt_config = get_optimization_config()
        opt_config.outcome_constraints[0].metric = Metric(name="m3")
        self.experiment.optimization_config = opt_config

        # Verify total metrics size is the same.
        self.assertEqual(
            len(get_optimization_config().metrics) + 1,
            len(self.experiment.metrics))

        # Test adding new tracking metric
        self.experiment.add_tracking_metric(Metric(name="m4"))
        self.assertEqual(
            len(get_optimization_config().metrics) + 2,
            len(self.experiment.metrics))

        # Test adding new tracking metrics
        self.experiment.add_tracking_metrics([Metric(name="z1")])
        self.assertEqual(
            len(get_optimization_config().metrics) + 3,
            len(self.experiment.metrics))

        # Verify update_tracking_metric updates the metric definition
        self.assertIsNone(self.experiment.metrics["m4"].lower_is_better)
        self.experiment.update_tracking_metric(
            Metric(name="m4", lower_is_better=True))
        self.assertTrue(self.experiment.metrics["m4"].lower_is_better)

        # Verify unable to add existing metric
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(Metric(name="m4"))

        # Verify unable to add existing metric
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metrics(
                [Metric(name="z1"), Metric(name="m4")])

        # Verify unable to add metric in optimization config
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(Metric(name="m1"))

        # Verify unable to add metric in optimization config
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metrics(
                [Metric(name="z2"), Metric(name="m1")])

        # Cannot update metric not already on experiment
        with self.assertRaises(ValueError):
            self.experiment.update_tracking_metric(Metric(name="m5"))

        # Cannot remove metric not already on experiment
        with self.assertRaises(ValueError):
            self.experiment.remove_tracking_metric(metric_name="m5")
Пример #2
0
 def test_split_by_arm(self):
     gm = {"hello": "world"}
     generator_run = GeneratorRun(
         arms=self.arms,
         weights=self.weights,
         optimization_config=get_optimization_config(),
         search_space=get_search_space(),
         gen_metadata=gm,
     )
     generator_runs = generator_run.split_by_arm()
     self.assertEqual(len(generator_runs), len(self.arms))
     for a, w, gr in zip(self.arms, self.weights, generator_runs):
         with self.subTest(a=a, w=w, gr=gr):
             # Make sure correct arms and weights appear in split
             # generator runs.
             self.assertEqual(gr.arms, [a])
             self.assertEqual(gr.weights, [w])
             self.assertEqual(
                 gr._generator_run_type, generator_run._generator_run_type
             )
             self.assertEqual(gr._model_key, generator_run._model_key)
             self.assertEqual(
                 gr._generation_step_index, generator_run._generation_step_index
             )
             self.assertIsNone(gr._optimization_config)
             self.assertIsNone(gr._search_space)
             self.assertIsNone(gr._gen_metadata)
Пример #3
0
    def testExperimentObjectiveUpdates(self):
        experiment = get_experiment_with_batch_trial()
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(),
                         len(experiment.metrics))

        # update objective
        # (should perform update in place)
        optimization_config = get_optimization_config()
        objective = get_objective()
        objective.minimize = True
        optimization_config.objective = objective
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(),
                         len(experiment.metrics))

        # replace objective
        # (old one should become tracking metric)
        optimization_config.objective = Objective(metric=Metric(
            name="objective"))
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(),
                         len(experiment.metrics))

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)
Пример #4
0
 def test_basic(self):
     """Run through the benchmarking loop."""
     results = full_benchmark_run(
         problem_groups={
             self.CATEGORY_NAME: [
                 SimpleBenchmarkProblem(branin, noise_sd=0.4),
                 BenchmarkProblem(
                     name="Branin",
                     search_space=get_branin_search_space(),
                     optimization_config=get_branin_optimization_config(),
                 ),
                 BenchmarkProblem(
                     search_space=get_branin_search_space(),
                     optimization_config=get_optimization_config(),
                 ),
             ]
         },
         method_groups={
             self.CATEGORY_NAME: [
                 GenerationStrategy(steps=[
                     GenerationStep(model=Models.SOBOL, num_trials=-1)
                 ])
             ]
         },
         num_replications=3,
         num_trials=5,
         # Just to have it be more telling if something is broken
         raise_all_exceptions=True,
         batch_size=[[1], [3], [1]],
     )
     self.assertEqual(len(results["Branin"]["Sobol"]), 3)
Пример #5
0
 def testGenMetadata(self):
     gm = {"hello": "world"}
     generator_run = GeneratorRun(
         arms=self.arms,
         weights=self.weights,
         optimization_config=get_optimization_config(),
         search_space=get_search_space(),
         gen_metadata=gm,
     )
     self.assertEqual(generator_run.gen_metadata, gm)
Пример #6
0
 def testTrackingMetricsMerge(self):
     # Tracking and optimization metrics should get merged
     # m1 is on optimization_config while m3 is not
     exp = Experiment(
         name="test2",
         search_space=get_search_space(),
         optimization_config=get_optimization_config(),
         tracking_metrics=[Metric(name="m1"), Metric(name="m3")],
     )
     self.assertEqual(len(exp.optimization_config.metrics) + 1, len(exp.metrics))
Пример #7
0
    def testEq(self):
        self.assertEqual(self.experiment, self.experiment)

        experiment2 = Experiment(
            name="test2",
            search_space=get_search_space(),
            optimization_config=get_optimization_config(),
            status_quo=get_arm(),
            description="test description",
        )
        self.assertNotEqual(self.experiment, experiment2)
Пример #8
0
 def testBestArm(self):
     generator_run = GeneratorRun(
         arms=self.arms,
         weights=self.weights,
         optimization_config=get_optimization_config(),
         search_space=get_search_space(),
         best_arm_predictions=(self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})),
     )
     self.assertEqual(
         generator_run.best_arm_predictions,
         (self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})),
     )
Пример #9
0
 def testModelPredictions(self):
     self.assertEqual(self.unweighted_run.model_predictions, get_model_predictions())
     self.assertEqual(
         self.unweighted_run.model_predictions_by_arm,
         get_model_predictions_per_arm(),
     )
     run_no_model_predictions = GeneratorRun(
         arms=self.arms,
         weights=self.weights,
         optimization_config=get_optimization_config(),
         search_space=get_search_space(),
     )
     self.assertIsNone(run_no_model_predictions.model_predictions)
     self.assertIsNone(run_no_model_predictions.model_predictions_by_arm)
Пример #10
0
    def testExperimentOutcomeConstraintUpdates(self):
        experiment = get_experiment_with_batch_trial()
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # update outcome constraint
        # (should perform update in place)
        optimization_config = get_optimization_config()
        outcome_constraint = get_outcome_constraint()
        outcome_constraint.bound = -1.0
        optimization_config.outcome_constraints = [outcome_constraint]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # add outcome constraint
        outcome_constraint2 = OutcomeConstraint(
            metric=Metric(name="outcome"), op=ComparisonOp.GEQ, bound=-0.5
        )
        optimization_config.outcome_constraints = [
            outcome_constraint,
            outcome_constraint2,
        ]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # remove outcome constraint
        # (old one should become tracking metric)
        optimization_config.outcome_constraints = [outcome_constraint]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)
Пример #11
0
    def setUp(self):
        self.model_predictions = get_model_predictions()
        self.optimization_config = get_optimization_config()
        self.search_space = get_search_space()

        self.arms = get_arms()
        self.weights = [2, 1, 1]
        self.unweighted_run = GeneratorRun(
            arms=self.arms,
            optimization_config=self.optimization_config,
            search_space=self.search_space,
            model_predictions=self.model_predictions,
            fit_time=4.0,
            gen_time=10.0,
        )
        self.weighted_run = GeneratorRun(
            arms=self.arms,
            weights=self.weights,
            optimization_config=self.optimization_config,
            search_space=self.search_space,
            model_predictions=self.model_predictions,
        )
Пример #12
0
 def testBasicProperties(self):
     self.assertEqual(self.experiment.status_quo, get_status_quo())
     self.assertEqual(self.experiment.search_space, get_search_space())
     self.assertEqual(self.experiment.optimization_config,
                      get_optimization_config())
     self.assertEqual(self.experiment.is_test, True)