def testSearchSpaceSetter(self): one_param_ss = SearchSpace( parameters=[get_search_space().parameters["w"]]) # Verify all search space ok with no trials self.experiment.search_space = one_param_ss self.assertEqual(len(self.experiment.parameters), 1) # Reset search space and add batch to trigger validations self.experiment.search_space = get_search_space() self.experiment.new_batch_trial() # Try search space with too few parameters with self.assertRaises(ValueError): self.experiment.search_space = one_param_ss # Try search space with different type bad_type_ss = get_search_space() bad_type_ss.parameters["x"]._parameter_type = ParameterType.FLOAT with self.assertRaises(ValueError): self.experiment.search_space = bad_type_ss # Try search space with additional parameters extra_param_ss = get_search_space() extra_param_ss.add_parameter( FixedParameter("l", ParameterType.FLOAT, 0.5)) with self.assertRaises(ValueError): self.experiment.search_space = extra_param_ss
def testNumArmsNoDeduplication(self): exp = Experiment(name="test_experiment", search_space=get_search_space()) arm = get_arm() exp.new_batch_trial().add_arm(arm) trial = exp.new_batch_trial().add_arm(arm) self.assertEqual(exp.sum_trial_sizes, 2) self.assertEqual(len(exp.arms_by_name), 1) trial.mark_arm_abandoned(trial.arms[0].name) self.assertEqual(exp.num_abandoned_arms, 1)
def testEq(self): self.assertEqual(self.experiment, self.experiment) experiment2 = Experiment( name="test2", search_space=get_search_space(), optimization_config=get_optimization_config(), status_quo=get_arm(), description="test description", ) self.assertNotEqual(self.experiment, experiment2)
def testBestArm(self): generator_run = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=get_optimization_config(), search_space=get_search_space(), best_arm_predictions=(self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})), ) self.assertEqual( generator_run.best_arm_predictions, (self.arms[0], ({"a": 1.0}, {"a": {"a": 2.0}})), )
def testTrackingMetricsMerge(self): # Tracking and optimization metrics should get merged # m1 is on optimization_config while m3 is not exp = Experiment( name="test2", search_space=get_search_space(), optimization_config=get_optimization_config(), tracking_metrics=[Metric(name="m1"), Metric(name="m3")], ) self.assertEqual( len(exp.optimization_config.metrics) + 1, len(exp.metrics))
def testEmptyMetrics(self): empty_experiment = Experiment(name="test_experiment", search_space=get_search_space()) self.assertEqual(empty_experiment.num_trials, 0) with self.assertRaises(ValueError): empty_experiment.fetch_data() batch = empty_experiment.new_batch_trial() self.assertEqual(empty_experiment.num_trials, 1) with self.assertRaises(ValueError): batch.fetch_data() empty_experiment.add_tracking_metric(Metric(name="some_metric")) empty_experiment.attach_data(get_data()) self.assertFalse(empty_experiment.fetch_data().df.empty)
def testModelPredictions(self): self.assertEqual(self.unweighted_run.model_predictions, get_model_predictions()) self.assertEqual( self.unweighted_run.model_predictions_by_arm, get_model_predictions_per_arm(), ) run_no_model_predictions = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=get_optimization_config(), search_space=get_search_space(), ) self.assertIsNone(run_no_model_predictions.model_predictions) self.assertIsNone(run_no_model_predictions.model_predictions_by_arm)
def testExperimentParameterUpdates(self): experiment = get_experiment_with_batch_trial() save_experiment(experiment) self.assertEqual( get_session().query(SQAParameter).count(), len(experiment.search_space.parameters), ) # update a parameter # (should perform update in place) search_space = get_search_space() parameter = get_choice_parameter() parameter.add_values(["foobar"]) search_space.update_parameter(parameter) experiment.search_space = search_space save_experiment(experiment) self.assertEqual( get_session().query(SQAParameter).count(), len(experiment.search_space.parameters), ) # add a parameter parameter = RangeParameter(name="x1", parameter_type=ParameterType.FLOAT, lower=-5, upper=10) search_space.add_parameter(parameter) experiment.search_space = search_space save_experiment(experiment) self.assertEqual( get_session().query(SQAParameter).count(), len(experiment.search_space.parameters), ) # remove a parameter # (old one should be deleted) del search_space._parameters["x1"] experiment.search_space = search_space save_experiment(experiment) self.assertEqual( get_session().query(SQAParameter).count(), len(experiment.search_space.parameters), ) loaded_experiment = load_experiment(experiment.name) self.assertEqual(experiment, loaded_experiment)
def get_modelbridge(mock_gen_arms, mock_observations_from_data, status_quo_name: Optional[str] = None) -> ModelBridge: exp = get_experiment() modelbridge = ModelBridge( search_space=get_search_space(), model=FullFactorialGenerator(), experiment=exp, data=get_data(), status_quo_name=status_quo_name, ) modelbridge._predict = mock.MagicMock( "ax.modelbridge.base.ModelBridge._predict", autospec=True, return_value=[get_observation().data], ) return modelbridge
def setUp(self): self.model_predictions = get_model_predictions() self.optimization_config = get_optimization_config() self.search_space = get_search_space() self.arms = get_arms() self.weights = [2, 1, 1] self.unweighted_run = GeneratorRun( arms=self.arms, optimization_config=self.optimization_config, search_space=self.search_space, model_predictions=self.model_predictions, fit_time=4.0, gen_time=10.0, ) self.weighted_run = GeneratorRun( arms=self.arms, weights=self.weights, optimization_config=self.optimization_config, search_space=self.search_space, model_predictions=self.model_predictions, )
def testBasicProperties(self): self.assertEqual(self.experiment.status_quo, get_status_quo()) self.assertEqual(self.experiment.search_space, get_search_space()) self.assertEqual(self.experiment.optimization_config, get_optimization_config()) self.assertEqual(self.experiment.is_test, True)