Exemplo n.º 1
0
    hpvis.finished_runs_over_time(all_runs)

    # This one visualizes the spearman rank correlation coefficients of the losses
    # between different budgets.
    hpvis.correlation_across_budgets(result)

    # For model based optimizers, one might wonder how much the model actually helped.
    # The next plot compares the performance of configs picked by the model vs. random ones
    hpvis.performance_histogram_model_vs_random(all_runs, id2conf)

    plt.show()

    d1 = res.get_pandas_dataframe()[0]
    loss = res.get_pandas_dataframe()[1]

    d1['loss'] = loss

if False:
    result = res
    # get all executed runs
    all_runs = result.get_all_runs()

    # get the 'dict' that translates config ids to the actual configurations
    id2conf = result.get_id2config_mapping()

    lcs = result.get_learning_curves()

    hpvis.interactive_HBS_plot(lcs,
                               tool_tip_strings=hpvis.default_tool_tips(
                                   result, lcs))
import hpbandster.core.result as hpres
import hpbandster.visualization as hpvis

result = search._res

# +
# get all executed runs
all_runs = result.get_all_runs()

# get the 'dict' that translates config ids to the actual configurations
id2conf = result.get_id2config_mapping()

lcs = result.get_learning_curves()

hpvis.interactive_HBS_plot(lcs)
# -

result.get_all_runs()[0].info['test_score_mean'], result.get_all_runs()[0].loss

# +
# Here is how you get he incumbent (best configuration)
inc_id = result.get_incumbent_id()

# let's grab the run on the highest budget
inc_runs = result.get_runs_by_id(inc_id)
inc_run = inc_runs[-1]

# We have access to all information: the config, the loss observed during
#optimization, and all the additional information
inc_loss = inc_run.loss