def plot_hp_search_optimizer(
    eval_dir,
    plot_dir,
    hp_plot_order,
    plot_parameter_names,
    metric_scales,
    xscale,
    style,
    line_label_format,
    select,
):
    changing_hp = hp_plot_order[-1]
    for_plotting = get_optimizer_evaluation_results(eval_dir, hp_plot_order,
                                                    AVERAGING_NUMBER)
    pupil_names = sorted(list(for_plotting.keys()))
    result_types = sorted(list(for_plotting[pupil_names[0]].keys()))
    regimes = sorted(list(
        for_plotting[pupil_names[0]][result_types[0]].keys()))
    fixed_hp_tmpl = create_plot_hp_layout(plot_dir, hp_plot_order, changing_hp)
    # print("(plot_hp_search)plot_parameter_names:", plot_parameter_names)
    xlabel = plot_parameter_names[changing_hp]

    for pupil_name in pupil_names:
        for res_type in result_types:
            ylabel, yscale = get_y_specs(res_type, plot_parameter_names,
                                         metric_scales)
            for regime in sorted(regimes):
                path = os.path.join(plot_dir, pupil_name, res_type, regime)
                create_path(path)
                data = for_plotting[pupil_name][res_type][regime]
                launch_plotting(data, line_label_format, fixed_hp_tmpl, path,
                                xlabel, ylabel, xscale, yscale, style, select)
    else:
        initial_experiment_counter_value = biggest_idx + 1
    env.grid_search_for_meta(
        evaluation,
        kwargs_for_pupil_building,
        kwargs_for_optimizer_building,
        build_pupil_hyperparameters=build_pupil_hyperparameters,
        build_optimizer_hyperparameters=build_optimizer_hyperparameters,
        other_hyperparameters=other_hyperparameters,
        initial_experiment_counter_value=initial_experiment_counter_value,
        **launch_kwargs
    )


hp_names = get_hp_names_from_conf_file(parameter_set_file_name)
for_plotting = get_optimizer_evaluation_results(save_path, hp_names,  AVERAGING_NUMBER)

best = get_best(for_plotting, 'optimizer')

metric_res = best['adam_prep']['loss']

best_on_valid = metric_res['validation']
print(' ' * 2 + 'loss' + ':', best_on_valid[1])
print_hps(hp_names, best_on_valid[0], 4)
best_conf = dict(list(zip(hp_names, best_on_valid[0])))
env.build_pupil(
    batch_size=BATCH_SIZE,
    **LSTM_SIZE,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
    going_to_limit_memory=True,
Exemple #3
0
    hp_names, metrics = get_hp_and_metric_names_from_pupil_eval_dir(eval_dir)
    dataset_names = get_dataset_names_from_eval_dir(eval_dir)
    if args.dataset_names is not None:
        dataset_names = args.dataset_names.split(',')
    for_plotting = get_pupil_evaluation_results(args.eval_dir, hp_names)

elif model == 'optimizer':
    hp_names = get_hp_names_from_optimizer_eval_dir(eval_dir)
    metrics, regimes = get_metric_names_and_regimes_from_optimizer_eval_dir(
        eval_dir)
    if args.regimes is not None:
        regimes = args.regimes.split(',')
    pupil_names = get_pupil_names_from_eval_dir(eval_dir)
    if args.pupil_names is not None:
        pupil_names = args.pupil_names.split(',')
    for_plotting = get_optimizer_evaluation_results(eval_dir, hp_names,
                                                    AVERAGING_NUMBER)

if args.target_metrics is not None:
    metrics = args.target_metrics.split(',')

best = get_best(for_plotting, model, value_filter=args.value_filter)
indents = [4, 8, 12]
print(best)
if model == 'pupil':
    for dataset_name in dataset_names:
        print('dataset:', dataset_name)
        for metric in metrics:
            b = best[dataset_name][metric]
            print(' ' * indents[0] + metric + ':', b[1])
            print_hps(hp_names, b[0], indents[1])
else: