예제 #1
0
    def bohb_plot(self, bohb_result, filename='bohb_plot.png'):
        """ Import plot from bohb """
        try:
            from hpbandster.core.result import extract_HB_learning_curves
            from hpbandster.visualization import interactive_HB_plot, default_tool_tips
        except ImportError as e:
            raise ImportError(
                "To analyze BOHB-data, please install hpbandster (e.g. `pip install hpbandster`)"
            )
        filename = os.path.join(self.output_dir, filename)
        # Hpbandster contains also a visualization tool to plot the
        # 'learning curves' of the sampled configurations
        incumbent_trajectory = bohb_result.get_incumbent_trajectory()
        lcs = bohb_result.get_learning_curves(
            lc_extractor=extract_HB_learning_curves)

        tool_tips = default_tool_tips(bohb_result, lcs)
        fig, ax, check, none_button, all_button = interactive_HB_plot(
            lcs, tool_tip_strings=tool_tips, show=False)
        ax.set_ylim([0.1 * incumbent_trajectory['losses'][-1], 1])
        #ax.set_yscale('log')

        fig.savefig(filename)
        plt.close(fig)
        return filename
예제 #2
0
    hpvis.finished_runs_over_time(all_runs)

    # This one visualizes the spearman rank correlation coefficients of the losses
    # between different budgets.
    hpvis.correlation_across_budgets(result)

    # For model based optimizers, one might wonder how much the model actually helped.
    # The next plot compares the performance of configs picked by the model vs. random ones
    hpvis.performance_histogram_model_vs_random(all_runs, id2conf)

    plt.show()

    d1 = res.get_pandas_dataframe()[0]
    loss = res.get_pandas_dataframe()[1]

    d1['loss'] = loss

if False:
    result = res
    # get all executed runs
    all_runs = result.get_all_runs()

    # get the 'dict' that translates config ids to the actual configurations
    id2conf = result.get_id2config_mapping()

    lcs = result.get_learning_curves()

    hpvis.interactive_HBS_plot(lcs,
                               tool_tip_strings=hpvis.default_tool_tips(
                                   result, lcs))
예제 #3
0
            bandwidth_factor=1,
            num_samples=32,
            min_bandwidth=1e-1
        )
res = HB.run(4)

# After the optimizer run, we shutdown the master.
HB.shutdown(shutdown_workers=True)
NS.shutdown()


# BOHB will return a result object.
# It holds informations about the optimization run like the incumbent (=best) configuration.
# For further details about the result-object, see its documentation.
id2config = res.get_id2config_mapping()
print('A total of %i unique configurations where sampled.'%len(id2config.keys()))
print('A total of %i runs where executed.'%len(res.get_all_runs()))


# Hpbandster contains also a visualization tool to plot the
# 'learning curves' of the sampled configurations
incumbent_trajectory = res.get_incumbent_trajectory()
lcs = res.get_learning_curves(lc_extractor=extract_HB_learning_curves)

tool_tips = default_tool_tips(res, lcs)
fig, ax, check, none_button, all_button = interactive_HB_plot(lcs, tool_tip_strings=tool_tips, show=False)
ax.set_ylim([0.1*incumbent_trajectory['losses'][-1], 1])
ax.set_yscale('log')

plt.show()
예제 #4
0
        [(r.budget, r.info['test_error']) for r in sr],
    ]
    return (value)


bohb_logs_dir = '{}/search_space_{}/run{}-seed{}'.format(
    args.working_directory, args.space, args.run_id, args.seed)
res = hpres.logged_results_to_HB_result(bohb_logs_dir)

lcs_temp = res.get_learning_curves(lc_extractor=extract_HB_learning_curves)
lcs = dict(lcs_temp)
for key, value in lcs_temp.items():
    if value == [[]]:
        del lcs[key]

tool_tips = hpvis.default_tool_tips(res, lcs)
#embed()

inc_id = res.get_incumbent_id()

id2conf = res.get_id2config_mapping()

inc_trajectory = res.get_incumbent_trajectory()
print(inc_trajectory)
print(res.get_runs_by_id(inc_id))

all_runs = list(
    filter(lambda r: not (r.info is None or r.loss is None),
           res.get_all_runs()))

budgets = res.HB_config['budgets']
예제 #5
0
import hpbandster.visualization as hpvis



# load the example run from the log files
result = hpres.logged_results_to_HBS_result('example_5_run/')

# get all executed runs
all_runs = result.get_all_runs()

# get the 'dict' that translates config ids to the actual configurations
id2conf = result.get_id2config_mapping()

lcs = result.get_learning_curves()

hpvis.interactive_HBS_plot(lcs, tool_tip_strings=hpvis.default_tool_tips(result, lcs))




def realtime_learning_curves(runs):
    """
    example how to extract a different kind of learning curve.

    The x values are now the time the runs finished, not the budget anymore.
    We no longer plot the validation loss on the y axis, but now the test accuracy.

    This is just to show how to get different information into the interactive plot.

    """
    sr = sorted(runs, key=lambda r: r.budget)