Beispiel #1
0
def performance_plot(experiment, best_vals):
    best_objectives = np.array([[trial.objective_mean for trial in experiment.trials.values()]])
    best_objective_plot = optimization_trace_single_method(
        y=np.minimum.accumulate(best_objectives, axis=1),
        optimum=best_vals[0]['loss'],
        title="Model performance vs. # of iterations",
        ylabel="loss")
    render(best_objective_plot)
    def plot_all(model, objectives, name="", rend=False):
        for o in objectives:
            plot = plot_contour(
                model=model,
                param_x="roll-p",
                param_y="roll-d",
                metric_name=o,
            )
            plot[0]['layout']['title'] = o
            data = plot[0]['data']
            lay = plot[0]['layout']

            for i, d in enumerate(data):
                if i > 1:
                    d['cliponaxis'] = False

            fig = {
                "data": data,
                "layout": lay,
            }
            go.Figure(fig).write_image(name + o + ".png")
            if rend: render(plot)
Beispiel #3
0
def mems_exp(cfg):
    log.info("============= Configuration =============")
    log.info(f"Config:\n{cfg.pretty()}")
    log.info("=========================================")

    search_space = gen_search_space(cfg.problem)
    outcome_con = gen_outcome_constraints(cfg.problem)

    exp = SimpleExperiment(
        name=cfg.problem.name,
        search_space=SearchSpace(search_space),
        evaluation_function=jumper,
        objective_name="Energy_(uJ)",
        minimize=False,
        outcome_constraints=outcome_con,
    )

    optimization_config = OptimizationConfig(objective=Objective(
        metric=MEMsMetric(name="Energy_(uJ)"),
        minimize=False,
    ), )

    class MyRunner(Runner):
        def run(self, trial):
            return {"name": str(trial.index)}

    exp.runner = MyRunner()
    exp.optimization_config = optimization_config
    from ax.utils.notebook.plotting import render, init_notebook_plotting
    from ax.plot.contour import plot_contour

    print(f"Running {cfg.bo.random} Sobol initialization trials...")
    sobol = Models.SOBOL(exp.search_space)
    num_search = cfg.bo.random
    for i in range(num_search):
        exp.new_trial(generator_run=sobol.gen(1))
        exp.trials[len(exp.trials) - 1].run()

    # data = exp.fetch_data()

    num_opt = cfg.bo.optimized
    for i in range(num_opt):
        if (i % 5) == 0 and cfg.plot_during:
            plot = plot_contour(
                model=gpei,
                param_x="N",
                param_y="L",
                metric_name="Energy_(uJ)",
            )
            data = plot[0]['data']
            lay = plot[0]['layout']

            render(plot)

        print(f"Running GP+EI optimization trial {i + 1}/{num_opt}...")
        # Reinitialize GP+EI model at each step with updated data.
        batch = exp.new_trial(generator_run=gpei.gen(1))
        gpei = Models.BOTORCH(experiment=exp, data=exp.eval())

    plot_learn = plot_learning(exp, cfg)
    # go.Figure(plot_learn).show()
    save_fig([plot_learn], "optimize")

    from ax.utils.notebook.plotting import render, init_notebook_plotting
    from ax.plot.contour import plot_contour

    plot = plot_contour(model=gpei,
                        param_x="N",
                        param_y="L",
                        metric_name="Energy_(uJ)",
                        lower_is_better=cfg.metric.minimize)
    save_fig(plot, dir=f"N_L_Energy")
    # render(plot)

    plot = plot_contour(model=gpei,
                        param_x="N",
                        param_y="w",
                        metric_name="Energy_(uJ)",
                        lower_is_better=cfg.metric.minimize)
    # render(plot)
    save_fig(plot, dir=f"N_w_Energy")

    plot = plot_contour(model=gpei,
                        param_x="w",
                        param_y="L",
                        metric_name="Energy_(uJ)",
                        lower_is_better=cfg.metric.minimize)
    save_fig(plot, dir=f"w_L_Energy")
Beispiel #4
0
def mems_exp(cfg):
    log.info("============= Configuration =============")
    log.info(f"Config:\n{cfg.pretty()}")
    log.info("=========================================")

    raise NotImplementedError("TODO load experimental data and verify likelihood of differnet points")
    search_space = gen_search_space(cfg.problem)
    outcome_con = gen_outcome_constraints(cfg.problem)

    exp = SimpleExperiment(
        name=cfg.problem.name,
        search_space=SearchSpace(search_space),
        evaluation_function=jumper,
        objective_name="Energy_(uJ)",
        minimize=False,
        outcome_constraints=outcome_con,
    )

    optimization_config = OptimizationConfig(
        objective=Objective(
            metric=MEMsMetric(name="Energy_(uJ)"),
            minimize=False,
        ),
    )

    class MyRunner(Runner):
        def run(self, trial):
            return {"name": str(trial.index)}

    exp.runner = MyRunner()
    exp.optimization_config = optimization_config
    from ax.utils.notebook.plotting import render, init_notebook_plotting
    from ax.plot.contour import plot_contour

    print(f"Running {cfg.bo.random} Sobol initialization trials...")
    sobol = Models.SOBOL(exp.search_space)
    num_search = cfg.bo.random
    for i in range(num_search):
        exp.new_trial(generator_run=sobol.gen(1))
        exp.trials[len(exp.trials) - 1].run()

    # data = exp.fetch_data()

    num_opt = cfg.bo.optimized
    for i in range(num_opt):
        if (i % 5) == 0 and cfg.plot_during:
            plot = plot_contour(model=gpei,
                                param_x="N",
                                param_y="L",
                                metric_name="Energy_(uJ)", )
            data = plot[0]['data']
            lay = plot[0]['layout']

            render(plot)

        print(f"Running GP+EI optimization trial {i + 1}/{num_opt}...")
        # Reinitialize GP+EI model at each step with updated data.
        batch = exp.new_trial(generator_run=gpei.gen(1))
        gpei = Models.BOTORCH(experiment=exp, data=exp.eval())

    gpei = Models.BOTORCH(experiment=exp, data=exp.eval())

    from ax.models.torch.botorch_defaults import predict_from_model
    import torch
    X = torch.Tensor([[2, 7e-4, 1e-4], [1, 5e-4, 1e-4]]).double()
    mean, cov = predict_from_model(gpei.model.model, X)
    # X(Tensor) – n x d parameters

    ll = log_likelihood(X, mean, cov)
    plot_ll(ll)
Beispiel #5
0
def hyperparam_plot(exp_model, param_x, param_y):
    render(plot_contour(exp_model, param_x, param_y, metric_name='loss'))
Beispiel #6
0
# for i in range(25):
#     parameters, trial_index = ax_client.get_next_trial()
#     # Local evaluation here can be replaced with deployment to external system.
#     ax_client.complete_trial(trial_index=trial_index, raw_data=evaluate(parameters))
#     # _, trial_index = ax_client.get_next_trial()
#     ax_client.log_trial_failure(trial_index=trial_index)
#
# ax_client.get_trials_data_frame().sort_values('trial_index')
# best_parameters, values = ax_client.get_best_parameters()

from ax.utils.notebook.plotting import render, init_notebook_plotting
from ax.plot.contour import plot_contour
plot = plot_contour(
    model=gpei,
    param_x=opt_list[0],
    param_y=opt_list[1],
    metric_name="base",
)
render(plot)
ax_client.generation_strategy.model = gpei
init_notebook_plotting(offline=True)
# render(ax_client.get_contour_plot())
render(ax_client.get_contour_plot(param_x=opt_list[0],
                                  param_y=opt_list[0]))  #, metric_name=base))
# render(ax_client.get_optimization_trace(objective_optimum=hartmann6.fmin))  # Objective_optimum is optional.

ax_client.save_to_json_file()  # For custom filepath, pass `filepath` argument.
restored_ax_client = AxClient.load_from_json_file(
)  # For custom filepath, pass `filepath` argument.
Beispiel #7
0
    )
    generatorRun = EHVImodel.gen(1)
    trial = EHVIexperiment.new_trial(generator_run=generatorRun)
    trial.run()
    EHVIdata = Data.from_multiple_data([EHVIdata, trial.fetch_data()])

    exp_df = exp_to_df(EHVIexperiment)
    outcomes = np.array(exp_df[['Accuracy', 'BitCost']], dtype=np.double)

    try:
        hv = observed_hypervolume(modelbridge=EHVImodel)
    except:
        hv = 0
        print("Failed to compute hv")
    EHVIhvList.append(hv)
    print(f"Iteration: {i}, HV: {hv}")

EHVIoutcomes = np.array(exp_to_df(EHVIexperiment)[['Accuracy', 'BitCost']],
                        dtype=np.double)

frontier = compute_pareto_frontier(
    experiment=EHVIexperiment,
    data=EHVIexperiment.fetch_data(),
    primary_objective=metric_a,
    secondary_objective=metric_b,
    absolute_metrics=["Accuracy", "BitCost"],
    num_points=25,
)

render(plot_pareto_frontier(frontier, CI_level=0.90))
Beispiel #8
0
                "bounds": [256, 2048],
                "log_scale": False
            },
            {
                "name": "batch_size",
                "type": "choice",
                "values": [32, 64, 128, 256, 512]
            },
        ],
        evaluation_function=train_evaluate,
        objective_name='accuracy',
        # generation_strategy=ax.models.random.sobol.SobolGenerator,
    )
    # import pdb; pdb.set_trace()

    render(
        plot_contour(model=model,
                     param_x='lr',
                     param_y='training_split',
                     metric_name='accuracy'))

    print(best_parameters, values[0])
    best_objectives = np.array(
        [[trial.objective_mean * 100 for trial in experiment.trials.values()]])
    best_objective_plot = optimization_trace_single_method(
        y=np.maximum.accumulate(best_objectives, axis=1),
        title="Model performance vs. # of iterations",
        ylabel="Classification Accuracy, %",
    )
    render(best_objective_plot)