def test_MTGP(self): """Tests MTGP instantiation.""" # Test Multi-type MTGP exp = get_multi_type_experiment(add_trials=True) mtgp = get_MTGP(experiment=exp, data=exp.fetch_data()) self.assertIsInstance(mtgp, TorchModelBridge) # Test Single-type MTGP exp = get_branin_experiment() # Check that factory generates a valid sobol modelbridge. sobol = get_sobol(search_space=exp.search_space) self.assertIsInstance(sobol, RandomModelBridge) for _ in range(5): sobol_run = sobol.gen(n=1) t = exp.new_batch_trial().add_generator_run(sobol_run) t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5) t.run().mark_completed() mtgp = get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=0) self.assertIsInstance(mtgp, TorchModelBridge) # mtgp_run = mtgp.gen( # n=1 # ) # TODO[T110948251]: This is broken at the ChoiceEncode level with self.assertRaises(ValueError): get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=9) exp = get_branin_experiment() sobol = get_sobol(search_space=exp.search_space) self.assertIsInstance(sobol, RandomModelBridge) sobol_run = sobol.gen(n=1) t = exp.new_batch_trial().add_generator_run(sobol_run) t.run().mark_completed() with self.assertRaises(ValueError): get_MTGP(experiment=exp, data=exp.fetch_data(), trial_index=0)
def setUp(self): self.experiment = get_multi_type_experiment(add_trials=True) self.data = self.experiment.fetch_data() self.observations = observations_from_data(self.experiment, self.data) self.observation_data = [o.data for o in self.observations] self.observation_features = [o.features for o in self.observations] self.tconfig = tconfig_from_mt_experiment(self.experiment)
def testEq(self): exp2 = get_multi_type_experiment() # Should be equal to start self.assertTrue(self.experiment == exp2) self.experiment.add_tracking_metric(BraninMetric("m3", ["x2", "x1"]), trial_type="type1", canonical_name="m4") # Test different set of metrics self.assertFalse(self.experiment == exp2) exp2.add_tracking_metric(BraninMetric("m3", ["x2", "x1"]), trial_type="type1", canonical_name="m5") # Test different metric definitions self.assertFalse(self.experiment == exp2) exp2.update_tracking_metric(BraninMetric("m3", ["x2", "x1"]), trial_type="type1", canonical_name="m4") # Should be the same self.assertTrue(self.experiment == exp2) exp2.remove_tracking_metric("m3") self.assertFalse(self.experiment == exp2)
def testMTExperimentSaveAndLoad(self): experiment = get_multi_type_experiment(add_trials=True) save_experiment(experiment) loaded_experiment = load_experiment(experiment.name) self.assertEqual(loaded_experiment.default_trial_type, "type1") self.assertEqual(len(loaded_experiment._trial_type_to_runner), 2) self.assertEqual(loaded_experiment.metric_to_trial_type["m1"], "type1") self.assertEqual(loaded_experiment.metric_to_trial_type["m2"], "type2") self.assertEqual(loaded_experiment._metric_to_canonical_name["m2"], "m1") self.assertEqual(len(loaded_experiment.trials), 2)
def test_multi_type_experiment(self): exp = get_multi_type_experiment() with self.assertRaises(NotImplementedError): MultiObjectiveTorchModelBridge( experiment=exp, search_space=exp.search_space, model=MultiObjectiveBotorchModel(), transforms=[], data=exp.fetch_data(), objective_thresholds={"branin_b": 0.0}, )
def test_exp_to_df(self): # MultiTypeExperiment should fail exp = get_multi_type_experiment() with self.assertRaisesRegex(ValueError, "MultiTypeExperiment"): exp_to_df(exp=exp) # exp with no trials should return empty results exp = get_branin_experiment() df = exp_to_df(exp=exp) self.assertEqual(len(df), 0) # set up experiment exp = get_branin_experiment(with_batch=True) # check that pre-run experiment returns all columns except objective df = exp_to_df(exp) self.assertEqual( set(EXPECTED_COLUMNS) - set(df.columns), {OBJECTIVE_NAME}) self.assertEqual(len(df.index), len(exp.arms_by_name)) exp.trials[0].run() # assert result is df with expected columns and length df = exp_to_df(exp=exp) self.assertIsInstance(df, pd.DataFrame) self.assertListEqual(sorted(df.columns), sorted(EXPECTED_COLUMNS)) self.assertEqual(len(df.index), len(exp.arms_by_name)) # test with run_metadata_fields and trial_properties_fields not empty # add source to properties for _, trial in exp.trials.items(): trial._properties["source"] = DUMMY_SOURCE df = exp_to_df(exp, run_metadata_fields=["name"], trial_properties_fields=["source"]) self.assertIn("name", df.columns) self.assertIn("trial_properties_source", df.columns) # test column values or types self.assertTrue(all(x == 0 for x in df.trial_index)) self.assertTrue(all(x == "RUNNING" for x in df.trial_status)) self.assertTrue(all(x == "Sobol" for x in df.generation_method)) self.assertTrue( all(x == DUMMY_SOURCE for x in df.trial_properties_source)) self.assertTrue(all(x == "branin_test_experiment_0" for x in df.name)) for float_column in FLOAT_COLUMNS: self.assertTrue(all( isinstance(x, float) for x in df[float_column])) # works correctly for failed trials (will need to mock) dummy_struct = namedtuple("dummy_struct", "df") mock_results = dummy_struct(df=pd.DataFrame({ "arm_name": ["0_0"], "metric_name": [OBJECTIVE_NAME], "mean": [DUMMY_OBJECTIVE_MEAN], "sem": [0], "trial_index": [0], "n": [123], "frac_nonnull": [1], })) with patch.object(Experiment, "fetch_data", lambda self, metrics: mock_results): df = exp_to_df(exp=exp) # all but one row should have a metric value of NaN self.assertEqual(pd.isna(df[OBJECTIVE_NAME]).sum(), len(df.index) - 1) # an experiment with more results than arms raises an error with patch.object( Experiment, "fetch_data", lambda self, metrics: mock_results), self.assertRaisesRegex( ValueError, "inconsistent experimental state"): exp_to_df(exp=get_branin_experiment()) # custom added trial has a generation_method of Manual custom_arm = Arm(name="custom", parameters={"x1": 0, "x2": 0}) exp.new_trial().add_arm(custom_arm) df = exp_to_df(exp) self.assertEqual(df[df.arm_name == "custom"].iloc[0].generation_method, "Manual")
def setUp(self): self.experiment = get_multi_type_experiment()