Exemplo n.º 1
0
def plot_learning(exp, cfg):
    objective_means = np.array([[exp.trials[trial].objective_mean]
                                for trial in exp.trials])
    cumulative = optimization_trace_single_method(
        y=np.maximum.accumulate(objective_means.T, axis=1) * 1.01,
        ylabel=cfg.metric.name,
        trace_color=tuple((83, 78, 194)),
        # optimum=-3.32237,  # Known minimum objective for Hartmann6 function.
    )
    all = optimization_trace_single_method(
        y=objective_means.T,
        ylabel=cfg.metric.name,
        model_transitions=[cfg.bo.random],
        trace_color=tuple((114, 110, 180)),
        # optimum=-3.32237,  # Known minimum objective for Hartmann6 function.
    )

    layout_learn = cumulative[0]['layout']
    layout_learn['paper_bgcolor'] = 'rgba(0,0,0,0)'
    layout_learn['plot_bgcolor'] = 'rgba(0,0,0,0)'
    layout_learn['showlegend'] = False

    d1 = cumulative[0]['data']
    d2 = all[0]['data']

    for t in d1:
        t['legendgroup'] = cfg.metric.name + ", cum. max"
        if 'name' in t and t['name'] == 'Generator change':
            t['name'] = 'End Random Iterations'
        else:
            t['name'] = cfg.metric.name + ", cum. max"
            t['line']['color'] = 'rgba(200,20,20,0.5)'
            t['line']['width'] = 4

    for t in d2:
        t['legendgroup'] = cfg.metric.name
        if 'name' in t and t['name'] == 'Generator change':
            t['name'] = 'End Random Iterations'
        else:
            t['name'] = cfg.metric.name
            t['line']['color'] = 'rgba(20,20,200,0.5)'
            t['line']['width'] = 4

    fig = {
        "data": d1 + d2,  # data,
        "layout": layout_learn,
    }
    return fig
Exemplo n.º 2
0
    def get_optimization_trace(self,
                               objective_optimum: Optional[float] = None
                               ) -> AxPlotConfig:
        """Retrieves the plot configuration for optimization trace, which shows
        the evolution of the objective mean over iterations.

        Args:
            objective_optimum: Optimal objective, if known, for display in the
                visualization.
        """
        if not self.experiment.trials:
            raise ValueError("Cannot generate plot as there are no trials.")
        objective_name = self.experiment.optimization_config.objective.metric.name
        best_objectives = np.array([[
            checked_cast(Trial, trial).objective_mean
            for trial in self.experiment.trials.values()
        ]])
        hover_labels = [
            _format_dict(not_none(checked_cast(Trial, trial).arm).parameters)
            for trial in self.experiment.trials.values()
        ]
        return optimization_trace_single_method(
            y=(np.minimum.accumulate(best_objectives, axis=1)
               if self.experiment.optimization_config.objective.minimize else
               np.maximum.accumulate(best_objectives, axis=1)),
            optimum=objective_optimum,
            title="Model performance vs. # of iterations",
            ylabel=objective_name.capitalize(),
            hover_labels=hover_labels,
        )
Exemplo n.º 3
0
def make_plots(benchmark_result: BenchmarkResult, problem_name: str,
               include_individual: bool) -> List[AxPlotConfig]:
    plots: List[AxPlotConfig] = []
    # Plot objective at true best
    plots.append(
        optimization_trace_all_methods(
            y_dict=benchmark_result.objective_at_true_best,
            optimum=benchmark_result.optimum,
            title=f"{problem_name}: cumulative best objective",
            ylabel="Objective at best-feasible point observed so far",
        ))
    if include_individual:
        # Plot individual plots of a single method on a single problem.
        for m, y in benchmark_result.objective_at_true_best.items():
            plots.append(
                optimization_trace_single_method(
                    y=y,
                    optimum=benchmark_result.optimum,
                    # model_transitions=benchmark_result.model_transitions[m],
                    title=f"{problem_name}, {m}: cumulative best objective",
                    ylabel="Objective at best-feasible point observed so far",
                ))
    # Plot time
    plots.append(
        optimization_times(
            fit_times=benchmark_result.fit_times,
            gen_times=benchmark_result.gen_times,
            title=f"{problem_name}: optimization times",
        ))
    return plots
Exemplo n.º 4
0
def performance_plot(experiment, best_vals):
    best_objectives = np.array([[trial.objective_mean for trial in experiment.trials.values()]])
    best_objective_plot = optimization_trace_single_method(
        y=np.minimum.accumulate(best_objectives, axis=1),
        optimum=best_vals[0]['loss'],
        title="Model performance vs. # of iterations",
        ylabel="loss")
    render(best_objective_plot)
Exemplo n.º 5
0
def plot_learning(exp, cfg):
    objective_means = np.array([[exp.trials[trial].objective_mean]
                                for trial in exp.trials])
    cumulative = optimization_trace_single_method(
        y=np.maximum.accumulate(objective_means.T, axis=1),
        ylabel=cfg.metric.name,
        trace_color=(83, 78, 194),
        # optimum=-3.32237,  # Known minimum objective for Hartmann6 function.
    )
    all = optimization_trace_single_method(
        y=objective_means.T,
        ylabel=cfg.metric.name,
        model_transitions=[cfg.bo.random],
        trace_color=(114, 110, 180),
        # optimum=-3.32237,  # Known minimum objective for Hartmann6 function.
    )
    layout_learn = cumulative[0]['layout']
    layout_learn['paper_bgcolor'] = 'rgba(0,0,0,0)'
    layout_learn['plot_bgcolor'] = 'rgba(0,0,0,0)'

    d1 = cumulative[0]['data']
    d2 = all[0]['data']

    for t in d1:
        t['legendgroup'] = cfg.metric.name + ", cum. max"
        if 'name' in t and t['name'] == 'Generator change':
            t['name'] = 'End Random Iterations'
        else:
            t['name'] = cfg.metric.name + ", cum. max"

    for t in d2:
        t['legendgroup'] = cfg.metric.name
        if 'name' in t and t['name'] == 'Generator change':
            t['name'] = 'End Random Iterations'
        else:
            t['name'] = cfg.metric.name

    fig = {
        "data": d1 + d2,  # data,
        "layout": layout_learn,
    }
    import plotly.graph_objects as go
    return go.Figure(fig)
Exemplo n.º 6
0
def make_plots(
    benchmark_result: BenchmarkResult, problem_name: str, include_individual: bool
) -> List[AxPlotConfig]:
    plots: List[AxPlotConfig] = []
    # Plot objective at true best
    ylabel = (
        "Feasible Hypervolume"
        if benchmark_result.is_multi_objective
        else "Objective at best-feasible point observed so far"
    )
    plots.append(
        optimization_trace_all_methods(
            y_dict=benchmark_result.true_performance,
            optimum=benchmark_result.optimum,
            title=f"{problem_name}: Optimization Performance",
            ylabel=ylabel,
        )
    )
    if include_individual:
        # Plot individual plots of a single method on a single problem.
        for m, y in benchmark_result.true_performance.items():
            plots.append(
                optimization_trace_single_method(
                    y=y,
                    optimum=benchmark_result.optimum,
                    # model_transitions=benchmark_result.model_transitions[m],
                    title=f"{problem_name}, {m}: cumulative best objective",
                    ylabel=ylabel,
                )
            )
    # Plot time
    plots.append(
        optimization_times(
            fit_times=benchmark_result.fit_times,
            gen_times=benchmark_result.gen_times,
            title=f"{problem_name}: cumulative optimization times",
        )
    )
    if benchmark_result.pareto_frontiers is not None:
        plots.append(
            plot_multiple_pareto_frontiers(
                frontiers=not_none(benchmark_result.pareto_frontiers),
                CI_level=0.0,
            )
        )
    return plots
Exemplo n.º 7
0
 def testTraces(self):
     exp = get_branin_experiment(with_batch=True)
     exp.trials[0].run()
     model = Models.BOTORCH(
         # Model bridge kwargs
         experiment=exp,
         data=exp.fetch_data(),
     )
     # Assert that each type of plot can be constructed successfully
     plot = optimization_trace_single_method_plotly(
         np.array([[1, 2, 3], [4, 5, 6]]),
         list(model.metric_names)[0],
         optimization_direction="minimize",
     )
     self.assertIsInstance(plot, go.Figure)
     plot = optimization_trace_single_method(
         np.array([[1, 2, 3], [4, 5, 6]]),
         list(model.metric_names)[0],
         optimization_direction="minimize",
     )
     self.assertIsInstance(plot, AxPlotConfig)
Exemplo n.º 8
0
                "bounds": [256, 2048],
                "log_scale": False
            },
            {
                "name": "batch_size",
                "type": "choice",
                "values": [32, 64, 128, 256, 512]
            },
        ],
        evaluation_function=train_evaluate,
        objective_name='accuracy',
        # generation_strategy=ax.models.random.sobol.SobolGenerator,
    )
    # import pdb; pdb.set_trace()

    render(
        plot_contour(model=model,
                     param_x='lr',
                     param_y='training_split',
                     metric_name='accuracy'))

    print(best_parameters, values[0])
    best_objectives = np.array(
        [[trial.objective_mean * 100 for trial in experiment.trials.values()]])
    best_objective_plot = optimization_trace_single_method(
        y=np.maximum.accumulate(best_objectives, axis=1),
        title="Model performance vs. # of iterations",
        ylabel="Classification Accuracy, %",
    )
    render(best_objective_plot)