def plot_progress(self, ax_client): model = Models.GPEI(experiment=ax_client.experiment, data=ax_client.experiment.fetch_data()) html_elements = [ plot_config_to_html(ax_client.get_optimization_trace()) ] model_params = get_range_parameters(model) try: if len(model_params) > 1: html_elements.append( plot_config_to_html( interact_contour( model=model, metric_name=self.YR.args.eval_primary_metric))) else: html_elements.append( plot_config_to_html( interact_slice( model=model, param_name=model_params[0].name, metric_name=self.YR.args.eval_primary_metric))) except TypeError: pass with open( os.path.join(self.bayes_opt_root_experiment_folder, "optimization_plots.html"), 'w') as f: f.write(render_report_elements(self.experiment_name, html_elements))
def plot_progress(self, ax_client): model = Models.GPEI(experiment=ax_client.experiment, data=ax_client.experiment.fetch_data()) html_elements = [] html_elements.append(plot_config_to_html(ax_client.get_optimization_trace())) try: html_elements.append(plot_config_to_html(interact_contour(model=model, metric_name=self.YR.args.eval_primary_metric))) except: pass with open(os.path.join(self.bayes_opt_root_experiment_folder, "optimization_plots.html"), 'w') as f: f.write(render_report_elements(self.experiment_name, html_elements))
def plot_progress(ax_client, root_experiment_folder, experiment_name): html_elements = [] html_elements.append( plot_config_to_html(ax_client.get_optimization_trace())) try: html_elements.append(plot_config_to_html(ax_client.get_contour_plot())) except: pass with open(os.path.join(root_experiment_folder, "optimization_plots.html"), 'w') as f: f.write(render_report_elements(experiment_name, html_elements))
def render(plot_config: AxPlotConfig, inject_helpers=False) -> None: """Render plot config.""" display_bundle = { "text/html": plot_config_to_html(plot_config, inject_helpers=inject_helpers) } display(display_bundle, raw=True)
def generate_report( benchmark_results: Dict[str, BenchmarkResult], errors_encountered: Optional[List[str]] = None, include_individual_method_plots: bool = False, notebook_env: bool = False, ) -> str: html_elements = [ h2_html("Bayesian Optimization benchmarking suite report") ] for p, benchmark_result in benchmark_results.items(): html_elements.append(h3_html(f"{p}:")) plots = make_plots( benchmark_result, problem_name=p, include_individual=include_individual_method_plots, ) html_elements.extend(plot_config_to_html(plt) for plt in plots) if errors_encountered: html_elements.append(h3_html("Errors encountered:")) html_elements.extend(p_html(err) for err in errors_encountered) else: html_elements.append(h3_html("No errors encountered!")) # Experiment name is used in header, which is disabled in this case. return render_report_elements( experiment_name="", html_elements=html_elements, header=False, notebook_env=notebook_env, )
def render(plot_config: AxPlotConfig, inject_helpers=False) -> None: """Render plot config.""" if plot_config.plot_type == AxPlotTypes.GENERIC: iplot(plot_config.data) else: display_bundle = { "text/html": plot_config_to_html(plot_config, inject_helpers=inject_helpers) } display(display_bundle, raw=True)
def generate_report(self, include_individual: bool = False) -> str: benchmark_result_dict = self._runner.aggregate_results() html_elements = [h2_html("Bayesian Optimization benchmarking suite report")] for p, benchmark_result in benchmark_result_dict.items(): html_elements.append(h3_html(f"{p}:")) plots = self._make_plots( benchmark_result, problem_name=p, include_individual=include_individual ) html_elements.extend(plot_config_to_html(plt) for plt in plots) if len(self._runner._error_messages) > 0: html_elements.append(h3_html("Errors encountered")) html_elements.extend(p_html(err) for err in self._runner._error_messages) else: html_elements.append(h3_html("No errors encountered")) return render_report_elements("bo_benchmark_suite_test", html_elements)