EHVImodel = None for i in range(N_BATCH): EHVImodel = get_MOO_EHVI( experiment=EHVIexperiment, data=EHVIdata, ) generatorRun = EHVImodel.gen(1) trial = EHVIexperiment.new_trial(generator_run=generatorRun) trial.run() EHVIdata = Data.from_multiple_data([EHVIdata, trial.fetch_data()]) exp_df = exp_to_df(EHVIexperiment) outcomes = np.array(exp_df[['Accuracy', 'BitCost']], dtype=np.double) try: hv = observed_hypervolume(modelbridge=EHVImodel) except: hv = 0 print("Failed to compute hv") EHVIhvList.append(hv) print(f"Iteration: {i}, HV: {hv}") EHVIoutcomes = np.array(exp_to_df(EHVIexperiment)[['Accuracy', 'BitCost']], dtype=np.double) frontier = compute_pareto_frontier( experiment=EHVIexperiment, data=EHVIexperiment.fetch_data(), primary_objective=metric_a, secondary_objective=metric_b, absolute_metrics=["Accuracy", "BitCost"],
def test_hypervolume(self, _): exp = get_branin_experiment_with_multi_objective( has_optimization_config=True, with_batch=True) for trial in exp.trials.values(): trial.mark_running(no_runner_required=True).mark_completed() metrics_dict = exp.optimization_config.metrics objective_thresholds = [ ObjectiveThreshold( metric=metrics_dict["branin_a"], bound=0.0, relative=False, op=ComparisonOp.GEQ, ), ObjectiveThreshold( metric=metrics_dict["branin_b"], bound=0.0, relative=False, op=ComparisonOp.GEQ, ), ] optimization_config = exp.optimization_config.clone_with_args( objective_thresholds=objective_thresholds) exp.attach_data( get_branin_data_multi_objective(trial_indices=exp.trials.keys())) modelbridge = MultiObjectiveTorchModelBridge( search_space=exp.search_space, model=MultiObjectiveBotorchModel(), optimization_config=optimization_config, transforms=[], experiment=exp, data=exp.fetch_data(), objective_thresholds=objective_thresholds, ) with patch( PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator: modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator hv = observed_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds) expected_hv = 25 # (5 - 0) * (5 - 0) wrapped_frontier_evaluator.assert_called_once() self.assertEqual(expected_hv, hv) with self.assertRaises(ValueError): predicted_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds, observation_features=[], ) observation_features = [ ObservationFeatures(parameters={ "x1": 1.0, "x2": 2.0 }), ObservationFeatures(parameters={ "x1": 2.0, "x2": 1.0 }), ] predicted_hv = predicted_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds, observation_features=observation_features, ) self.assertTrue(predicted_hv >= 0)
def test_hypervolume(self, _, cuda=False): for num_objectives in (2, 3): exp = get_branin_experiment_with_multi_objective( has_optimization_config=True, with_batch=True, num_objectives=num_objectives, ) for trial in exp.trials.values(): trial.mark_running(no_runner_required=True).mark_completed() metrics_dict = exp.optimization_config.metrics objective_thresholds = [ ObjectiveThreshold( metric=metrics_dict["branin_a"], bound=0.0, relative=False, op=ComparisonOp.GEQ, ), ObjectiveThreshold( metric=metrics_dict["branin_b"], bound=1.0, relative=False, op=ComparisonOp.GEQ, ), ] if num_objectives == 3: objective_thresholds.append( ObjectiveThreshold( metric=metrics_dict["branin_c"], bound=2.0, relative=False, op=ComparisonOp.GEQ, ) ) optimization_config = exp.optimization_config.clone_with_args( objective_thresholds=objective_thresholds ) exp.attach_data( get_branin_data_multi_objective( trial_indices=exp.trials.keys(), num_objectives=num_objectives ) ) modelbridge = TorchModelBridge( search_space=exp.search_space, model=MultiObjectiveBotorchModel(), optimization_config=optimization_config, transforms=[], experiment=exp, data=exp.fetch_data(), torch_device=torch.device("cuda" if cuda else "cpu"), objective_thresholds=objective_thresholds, ) with patch( PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator ) as wrapped_frontier_evaluator: modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator hv = observed_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds ) expected_hv = 20 if num_objectives == 2 else 60 # 5 * 4 (* 3) wrapped_frontier_evaluator.assert_called_once() self.assertEqual(expected_hv, hv) if num_objectives == 3: # Test selected_metrics hv = observed_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds, selected_metrics=["branin_a", "branin_c"], ) expected_hv = 15 # (5 - 0) * (5 - 2) self.assertEqual(expected_hv, hv) # test that non-objective outcome raises value error with self.assertRaises(ValueError): hv = observed_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds, selected_metrics=["tracking"], ) with self.assertRaises(ValueError): predicted_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds, observation_features=[], ) observation_features = [ ObservationFeatures(parameters={"x1": 1.0, "x2": 2.0}), ObservationFeatures(parameters={"x1": 2.0, "x2": 1.0}), ] predicted_hv = predicted_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds, observation_features=observation_features, ) self.assertTrue(predicted_hv >= 0) if num_objectives == 3: # Test selected_metrics predicted_hv = predicted_hypervolume( modelbridge=modelbridge, objective_thresholds=objective_thresholds, observation_features=observation_features, selected_metrics=["branin_a", "branin_c"], ) self.assertTrue(predicted_hv >= 0)