示例#1
0
elif model == 'optimizer':
    hp_names = get_hp_names_from_optimizer_eval_dir(eval_dir)
    metrics, regimes = get_metric_names_and_regimes_from_optimizer_eval_dir(
        eval_dir)
    if args.regimes is not None:
        regimes = args.regimes.split(',')
    pupil_names = get_pupil_names_from_eval_dir(eval_dir)
    if args.pupil_names is not None:
        pupil_names = args.pupil_names.split(',')
    for_plotting = get_optimizer_evaluation_results(eval_dir, hp_names,
                                                    AVERAGING_NUMBER)

if args.target_metrics is not None:
    metrics = args.target_metrics.split(',')

best = get_best(for_plotting, model, value_filter=args.value_filter)
indents = [4, 8, 12]
print(best)
if model == 'pupil':
    for dataset_name in dataset_names:
        print('dataset:', dataset_name)
        for metric in metrics:
            b = best[dataset_name][metric]
            print(' ' * indents[0] + metric + ':', b[1])
            print_hps(hp_names, b[0], indents[1])
else:
    for pupil_name in pupil_names:
        print('pupil name:', pupil_name)
        for metric in metrics:
            print(' ' * indents[0] + metric + ':')
            for regime in regimes:
示例#2
0
    env.grid_search_for_meta(
        evaluation,
        kwargs_for_pupil_building,
        kwargs_for_optimizer_building,
        build_pupil_hyperparameters=build_pupil_hyperparameters,
        build_optimizer_hyperparameters=build_optimizer_hyperparameters,
        other_hyperparameters=other_hyperparameters,
        initial_experiment_counter_value=initial_experiment_counter_value,
        **launch_kwargs
    )


hp_names = get_hp_names_from_conf_file(parameter_set_file_name)
for_plotting = get_optimizer_evaluation_results(save_path, hp_names,  AVERAGING_NUMBER)

best = get_best(for_plotting, 'optimizer')

metric_res = best['adam_prep']['loss']

best_on_valid = metric_res['validation']
print(' ' * 2 + 'loss' + ':', best_on_valid[1])
print_hps(hp_names, best_on_valid[0], 4)
best_conf = dict(list(zip(hp_names, best_on_valid[0])))
env.build_pupil(
    batch_size=BATCH_SIZE,
    **LSTM_SIZE,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
    going_to_limit_memory=True,
)
示例#3
0
        learning_rate=dict(varying=dict(init=conf['learning_rate/init']),
                           fixed=dict(decay=1., period=1e+6),
                           hp_type='built-in',
                           type='exponential_decay'))

    tf.set_random_seed(1)
    env.grid_search(evaluation,
                    kwargs_for_building,
                    build_hyperparameters=build_hyperparameters,
                    other_hyperparameters=other_hyperparameters,
                    **launch_kwargs)

hp_names = get_hp_names_from_conf_file(parameter_set_file_name)
for_plotting = get_pupil_evaluation_results(save_path, hp_names)

best = get_best(for_plotting, 'pupil')
env.build_pupil(batch_size=BATCH_SIZE,
                num_layers=2,
                num_hidden_nodes=[1000],
                input_shape=[3072],
                num_classes=10,
                additional_metrics=add_metrics,
                optimizer=opt)
for dataset_name, dataset_res in best.items():
    print('dataset:', dataset_name)
    for metric, b in dataset_res.items():
        print(' ' * 2 + metric + ':', b[1])
        print_hps(hp_names, b[0], 4)
        best_conf = dict(list(zip(hp_names, b[0])))
        training_path = os.path.join(base, metric + '_best', 'test',
                                     'training')