Exemplo n.º 1
0
def make_plots(benchmark_result: BenchmarkResult, problem_name: str,
               include_individual: bool) -> List[AxPlotConfig]:
    plots: List[AxPlotConfig] = []
    # Plot objective at true best
    plots.append(
        optimization_trace_all_methods(
            y_dict=benchmark_result.objective_at_true_best,
            optimum=benchmark_result.optimum,
            title=f"{problem_name}: cumulative best objective",
            ylabel="Objective at best-feasible point observed so far",
        ))
    if include_individual:
        # Plot individual plots of a single method on a single problem.
        for m, y in benchmark_result.objective_at_true_best.items():
            plots.append(
                optimization_trace_single_method(
                    y=y,
                    optimum=benchmark_result.optimum,
                    # model_transitions=benchmark_result.model_transitions[m],
                    title=f"{problem_name}, {m}: cumulative best objective",
                    ylabel="Objective at best-feasible point observed so far",
                ))
    # Plot time
    plots.append(
        optimization_times(
            fit_times=benchmark_result.fit_times,
            gen_times=benchmark_result.gen_times,
            title=f"{problem_name}: optimization times",
        ))
    return plots
Exemplo n.º 2
0
def make_plots(
    benchmark_result: BenchmarkResult, problem_name: str, include_individual: bool
) -> List[AxPlotConfig]:
    plots: List[AxPlotConfig] = []
    # Plot objective at true best
    ylabel = (
        "Feasible Hypervolume"
        if benchmark_result.is_multi_objective
        else "Objective at best-feasible point observed so far"
    )
    plots.append(
        optimization_trace_all_methods(
            y_dict=benchmark_result.true_performance,
            optimum=benchmark_result.optimum,
            title=f"{problem_name}: Optimization Performance",
            ylabel=ylabel,
        )
    )
    if include_individual:
        # Plot individual plots of a single method on a single problem.
        for m, y in benchmark_result.true_performance.items():
            plots.append(
                optimization_trace_single_method(
                    y=y,
                    optimum=benchmark_result.optimum,
                    # model_transitions=benchmark_result.model_transitions[m],
                    title=f"{problem_name}, {m}: cumulative best objective",
                    ylabel=ylabel,
                )
            )
    # Plot time
    plots.append(
        optimization_times(
            fit_times=benchmark_result.fit_times,
            gen_times=benchmark_result.gen_times,
            title=f"{problem_name}: cumulative optimization times",
        )
    )
    if benchmark_result.pareto_frontiers is not None:
        plots.append(
            plot_multiple_pareto_frontiers(
                frontiers=not_none(benchmark_result.pareto_frontiers),
                CI_level=0.0,
            )
        )
    return plots