Beispiel #1
0
        regimes = args.regimes.split(',')
    pupil_names = get_pupil_names_from_eval_dir(eval_dir)
    if args.pupil_names is not None:
        pupil_names = args.pupil_names.split(',')
    for_plotting = get_optimizer_evaluation_results(eval_dir, hp_names,
                                                    AVERAGING_NUMBER)

if args.target_metrics is not None:
    metrics = args.target_metrics.split(',')

best = get_best(for_plotting, model, value_filter=args.value_filter)
indents = [4, 8, 12]
print(best)
if model == 'pupil':
    for dataset_name in dataset_names:
        print('dataset:', dataset_name)
        for metric in metrics:
            b = best[dataset_name][metric]
            print(' ' * indents[0] + metric + ':', b[1])
            print_hps(hp_names, b[0], indents[1])
else:
    for pupil_name in pupil_names:
        print('pupil name:', pupil_name)
        for metric in metrics:
            print(' ' * indents[0] + metric + ':')
            for regime in regimes:
                b = best[pupil_name][metric][regime]
                print(' ' * indents[1] + regime + ' result:', regime)
                print(' ' * indents[2] + 'result:', b[1])
                print_hps(hp_names, b[0], indents[2])
Beispiel #2
0
        other_hyperparameters=other_hyperparameters,
        initial_experiment_counter_value=initial_experiment_counter_value,
        **launch_kwargs
    )


hp_names = get_hp_names_from_conf_file(parameter_set_file_name)
for_plotting = get_optimizer_evaluation_results(save_path, hp_names,  AVERAGING_NUMBER)

best = get_best(for_plotting, 'optimizer')

metric_res = best['adam_prep']['loss']

best_on_valid = metric_res['validation']
print(' ' * 2 + 'loss' + ':', best_on_valid[1])
print_hps(hp_names, best_on_valid[0], 4)
best_conf = dict(list(zip(hp_names, best_on_valid[0])))
env.build_pupil(
    batch_size=BATCH_SIZE,
    **LSTM_SIZE,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
    going_to_limit_memory=True,
)

env.build_optimizer(
    **OPTIMIZER_PARAMETERS,
    optimizer_init_parameter=best_conf['optimizer_init_parameter'],
)

Beispiel #3
0
hp_names = get_hp_names_from_conf_file(parameter_set_file_name)
for_plotting = get_pupil_evaluation_results(save_path, hp_names)

best = get_best(for_plotting, 'pupil')
env.build_pupil(batch_size=BATCH_SIZE,
                num_layers=2,
                num_hidden_nodes=[1000],
                input_shape=[3072],
                num_classes=10,
                additional_metrics=add_metrics,
                optimizer=opt)
for dataset_name, dataset_res in best.items():
    print('dataset:', dataset_name)
    for metric, b in dataset_res.items():
        print(' ' * 2 + metric + ':', b[1])
        print_hps(hp_names, b[0], 4)
        best_conf = dict(list(zip(hp_names, b[0])))
        training_path = os.path.join(base, metric + '_best', 'test',
                                     'training')

        env.train(
            allow_growth=True,
            # save_path='debug_grid_search',
            result_types=['loss', 'bpc', 'perplexity', 'accuracy'],
            additions_to_feed_dict=train_add_feed,
            # pupil_restore_paths=['debug_empty_meta_optimizer/not_learning_issue_es20_nn20/checkpoints/0'],
            # stop=stop_specs,
            save_path=training_path,
            restore_path=RESTORE_PATH,
            stop=1000,
            results_collect_interval=1000,