def testExperimentObjectiveUpdates(self): experiment = get_experiment_with_batch_trial() save_experiment(experiment) self.assertEqual( get_session().query(SQAMetric).count(), len(experiment.metrics) ) # update objective # (should perform update in place) optimization_config = get_optimization_config() objective = get_objective() objective.minimize = True optimization_config.objective = objective experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( get_session().query(SQAMetric).count(), len(experiment.metrics) ) # replace objective # (old one should become tracking metric) optimization_config.objective = Objective(metric=Metric(name="objective")) experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( get_session().query(SQAMetric).count(), len(experiment.metrics) ) loaded_experiment = load_experiment(experiment.name) self.assertEqual(experiment, loaded_experiment)
def testMetricSetters(self): # Establish current metrics size self.assertEqual( len(get_optimization_config().metrics) + 1, len(self.experiment.metrics)) # Add optimization config with 1 different metric opt_config = get_optimization_config() opt_config.outcome_constraints[0].metric = Metric(name="m3") self.experiment.optimization_config = opt_config # Verify total metrics size is the same. self.assertEqual( len(get_optimization_config().metrics) + 1, len(self.experiment.metrics)) # Test adding new tracking metric self.experiment.add_tracking_metric(Metric(name="m4")) self.assertEqual( len(get_optimization_config().metrics) + 2, len(self.experiment.metrics)) # Verify update_tracking_metric updates the metric definition self.assertIsNone(self.experiment.metrics["m4"].lower_is_better) self.experiment.update_tracking_metric( Metric(name="m4", lower_is_better=True)) self.assertTrue(self.experiment.metrics["m4"].lower_is_better) # Verify unable to add existing metric with self.assertRaises(ValueError): self.experiment.add_tracking_metric(Metric(name="m4")) # Verify unable to add metric in optimization config with self.assertRaises(ValueError): self.experiment.add_tracking_metric(Metric(name="m1")) # Cannot update metric not already on experiment with self.assertRaises(ValueError): self.experiment.update_tracking_metric(Metric(name="m5")) # Cannot remove metric not already on experiment with self.assertRaises(ValueError): self.experiment.remove_tracking_metric(metric_name="m5")
def testEq(self): self.assertEqual(self.experiment, self.experiment) experiment2 = Experiment( name="test2", search_space=get_search_space(), optimization_config=get_optimization_config(), status_quo=get_arm(), description="test description", ) self.assertNotEqual(self.experiment, experiment2)
def testBestArm(self): generator_run = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=get_optimization_config(), search_space=get_search_space(), best_arm_predictions=(self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})), ) self.assertEqual( generator_run.best_arm_predictions, (self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})), )
def testTrackingMetricsMerge(self): # Tracking and optimization metrics should get merged # m1 is on optimization_config while m3 is not exp = Experiment( name="test2", search_space=get_search_space(), optimization_config=get_optimization_config(), tracking_metrics=[Metric(name="m1"), Metric(name="m3")], ) self.assertEqual( len(exp.optimization_config.metrics) + 1, len(exp.metrics))
def testModelPredictions(self): self.assertEqual(self.unweighted_run.model_predictions, get_model_predictions()) self.assertEqual( self.unweighted_run.model_predictions_by_arm, get_model_predictions_per_arm(), ) run_no_model_predictions = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=get_optimization_config(), search_space=get_search_space(), ) self.assertIsNone(run_no_model_predictions.model_predictions) self.assertIsNone(run_no_model_predictions.model_predictions_by_arm)
def testExperimentOutcomeConstraintUpdates(self): experiment = get_experiment_with_batch_trial() save_experiment(experiment) self.assertEqual( get_session().query(SQAMetric).count(), len(experiment.metrics) ) # update outcome constraint # (should perform update in place) optimization_config = get_optimization_config() outcome_constraint = get_outcome_constraint() outcome_constraint.bound = -1.0 optimization_config.outcome_constraints = [outcome_constraint] experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( get_session().query(SQAMetric).count(), len(experiment.metrics) ) # add outcome constraint outcome_constraint2 = OutcomeConstraint( metric=Metric(name="outcome"), op=ComparisonOp.GEQ, bound=-0.5 ) optimization_config.outcome_constraints = [ outcome_constraint, outcome_constraint2, ] experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( get_session().query(SQAMetric).count(), len(experiment.metrics) ) # remove outcome constraint # (old one should become tracking metric) optimization_config.outcome_constraints = [outcome_constraint] experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( get_session().query(SQAMetric).count(), len(experiment.metrics) ) loaded_experiment = load_experiment(experiment.name) self.assertEqual(experiment, loaded_experiment)
def setUp(self): self.model_predictions = get_model_predictions() self.optimization_config = get_optimization_config() self.search_space = get_search_space() self.arms = get_arms() self.weights = [2, 1, 1] self.unweighted_run = GeneratorRun( arms=self.arms, optimization_config=self.optimization_config, search_space=self.search_space, model_predictions=self.model_predictions, fit_time=4.0, gen_time=10.0, ) self.weighted_run = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=self.optimization_config, search_space=self.search_space, model_predictions=self.model_predictions, )
def testBasicProperties(self): self.assertEqual(self.experiment.status_quo, get_status_quo()) self.assertEqual(self.experiment.search_space, get_search_space()) self.assertEqual(self.experiment.optimization_config, get_optimization_config()) self.assertEqual(self.experiment.is_test, True)