def _extract_optimization_trace_from_metrics( experiment: Experiment) -> np.ndarray: names = [] for trial in experiment.trials.values(): for i, arm in enumerate(trial.arms): reps = int(trial.weights[i]) if isinstance(trial, BatchTrial) else 1 names.extend([arm.name] * reps) iters_df = pd.DataFrame({"arm_name": names}) data_df = experiment.fetch_data(noisy=False).df metrics = data_df["metric_name"].unique() true_values = {} for metric in metrics: df_m = data_df[data_df["metric_name"] == metric] # Get one row per arm df_m = df_m.groupby("arm_name").first().reset_index() df_b = pd.merge(iters_df, df_m, how="left", on="arm_name") true_values[metric] = df_b["mean"].values if isinstance(experiment.optimization_config, MultiObjectiveOptimizationConfig): return feasible_hypervolume( # pyre-fixme[6]: Expected `OptimizationConfig` for 1st param but got # `Optional[ax.core.optimization_config.OptimizationConfig]`. optimization_config=experiment.optimization_config, values=true_values, ) return best_feasible_objective( # pyre-fixme[6]: Expected `OptimizationConfig` for 1st param but got # `Optional[ax.core.optimization_config.OptimizationConfig]`. optimization_config=experiment.optimization_config, values=true_values, )
def test_feasible_hypervolume(self): ma = Metric(name="a", lower_is_better=False) mb = Metric(name="b", lower_is_better=True) mc = Metric(name="c", lower_is_better=False) optimization_config = MultiObjectiveOptimizationConfig( objective=MultiObjective(metrics=[ma, mb]), outcome_constraints=[ OutcomeConstraint( mc, op=ComparisonOp.GEQ, bound=0, relative=False, ) ], objective_thresholds=[ ObjectiveThreshold( ma, bound=1.0, ), ObjectiveThreshold( mb, bound=1.0, ), ], ) feas_hv = feasible_hypervolume( optimization_config, values={ "a": np.array( [ 1.0, 3.0, 2.0, 2.0, ] ), "b": np.array( [ 0.0, 1.0, 0.0, 0.0, ] ), "c": np.array( [ 0.0, -0.0, 1.0, -2.0, ] ), }, ) self.assertEqual(list(feas_hv), [0.0, 0.0, 1.0, 1.0])