def test_get_standard_plots(self): exp = get_branin_experiment() self.assertEqual( len( get_standard_plots( experiment=exp, generation_strategy=get_generation_strategy())), 0, )
def test_get_standard_plots(self): exp = get_branin_experiment() self.assertEqual( len( get_standard_plots( experiment=exp, model=get_generation_strategy().model ) ), 0, ) exp = get_branin_experiment(with_batch=True, minimize=True) exp.trials[0].run() gs = choose_generation_strategy(search_space=exp.search_space) gs._model = Models.BOTORCH( experiment=exp, data=exp.fetch_data(), ) plots = get_standard_plots(experiment=exp, model=gs.model) self.assertEqual(len(plots), 3) self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
def test_get_standard_plots(self): exp = get_branin_experiment() self.assertEqual( len( get_standard_plots(experiment=exp, model=get_generation_strategy().model)), 0, ) exp = get_branin_experiment(with_batch=True, minimize=True) exp.trials[0].run() plots = get_standard_plots( experiment=exp, model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()), ) self.assertEqual(len(plots), 6) self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots)) exp = get_branin_experiment_with_multi_objective(with_batch=True) exp.optimization_config.objective.objectives[0].minimize = False exp.optimization_config.objective.objectives[1].minimize = True exp.optimization_config._objective_thresholds = [ ObjectiveThreshold(metric=exp.metrics["branin_a"], op=ComparisonOp.GEQ, bound=-100.0), ObjectiveThreshold(metric=exp.metrics["branin_b"], op=ComparisonOp.LEQ, bound=100.0), ] exp.trials[0].run() plots = get_standard_plots(experiment=exp, model=Models.MOO(experiment=exp, data=exp.fetch_data())) self.assertEqual(len(plots), 7) # All plots are successfully created when objective thresholds are absent exp.optimization_config._objective_thresholds = [] plots = get_standard_plots(experiment=exp, model=Models.MOO(experiment=exp, data=exp.fetch_data())) self.assertEqual(len(plots), 7) exp = get_branin_experiment_with_timestamp_map_metric( with_status_quo=True) exp.new_trial().add_arm(exp.status_quo) exp.trials[0].run() exp.new_trial(generator_run=Models.SOBOL( search_space=exp.search_space).gen(n=1)) exp.trials[1].run() plots = get_standard_plots( experiment=exp, model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()), true_objective_metric_name="branin", ) self.assertEqual(len(plots), 9) self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots)) self.assertIn( "Objective branin_map vs. True Objective Metric branin", [p.layout.title.text for p in plots], ) with self.assertRaisesRegex( ValueError, "Please add a valid true_objective_metric_name"): plots = get_standard_plots( experiment=exp, model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()), true_objective_metric_name="not_present", )