def _get_objective_trace_plot( experiment: Experiment, metric_name: str, model_transitions: List[int], optimization_direction: Optional[str] = None, ) -> Optional[go.Figure]: best_objectives = np.array([experiment.fetch_data().df["mean"]]) return optimization_trace_single_method_plotly( y=best_objectives, title="Best objective found vs. # of iterations", ylabel=metric_name, model_transitions=model_transitions, optimization_direction=optimization_direction, plot_trial_points=True, )
def _get_objective_trace_plot( trials: Dict[int, BaseTrial], metric_name: str, model_transitions: List[int], optimization_direction: Optional[str] = None, # pyre-ignore[11]: Annotation `go.Figure` is not defined as a type. ) -> Optional[go.Figure]: best_objectives = np.array( [[checked_cast(Trial, t).objective_mean for t in trials.values()]]) return optimization_trace_single_method_plotly( y=best_objectives, title="Best objective found vs. # of iterations", ylabel=metric_name, model_transitions=model_transitions, optimization_direction=optimization_direction, plot_trial_points=True, )
def testTraces(self): exp = get_branin_experiment(with_batch=True) exp.trials[0].run() model = Models.BOTORCH( # Model bridge kwargs experiment=exp, data=exp.fetch_data(), ) # Assert that each type of plot can be constructed successfully plot = optimization_trace_single_method_plotly( np.array([[1, 2, 3], [4, 5, 6]]), list(model.metric_names)[0], optimization_direction="minimize", ) self.assertIsInstance(plot, go.Figure) plot = optimization_trace_single_method( np.array([[1, 2, 3], [4, 5, 6]]), list(model.metric_names)[0], optimization_direction="minimize", ) self.assertIsInstance(plot, AxPlotConfig)
def _get_objective_trace_plot( experiment: Experiment, data: Union[Data, MapData], model_transitions: List[int], ) -> Optional[go.Figure]: if experiment.is_moo_problem: return _get_hypervolume_trace() metric_name = not_none( experiment.optimization_config).objective.metric.name best_objectives = np.array( [data.df[data.df["metric_name"] == metric_name]["mean"]]) return optimization_trace_single_method_plotly( y=best_objectives, title="Best objective found vs. # of iterations", ylabel=not_none(experiment.optimization_config).objective.metric.name, model_transitions=model_transitions, optimization_direction=("minimize" if not_none( experiment.optimization_config).objective.minimize else "maximize"), plot_trial_points=True, )
def _get_objective_trace_plot( experiment: Experiment, data: Data, model_transitions: List[int], true_objective_metric_name: Optional[str] = None, ) -> Iterable[go.Figure]: if experiment.is_moo_problem: # TODO: implement `_get_hypervolume_trace()` return [_pareto_frontier_scatter_2d_plotly(experiment=experiment)] optimization_config = experiment.optimization_config if optimization_config is None: return [] metric_names = (metric_name for metric_name in [ optimization_config.objective.metric.name, true_objective_metric_name, ] if metric_name is not None) plots = [ optimization_trace_single_method_plotly( y=np.array( [data.df[data.df["metric_name"] == metric_name]["mean"]]), title=f"Best {metric_name} found vs. # of iterations", ylabel=metric_name, model_transitions=model_transitions, # Try and use the metric's lower_is_better property, but fall back on # objective's minimize property if relevent optimization_direction=( ("minimize" if experiment.metrics[metric_name].lower_is_better is True else "maximize") if experiment.metrics[metric_name].lower_is_better is not None else ("minimize" if optimization_config.objective.minimize else "maximize")), plot_trial_points=True, ) for metric_name in metric_names ] return [plot for plot in plots if plot is not None]