Beispiel #1
0
metric_res = best['adam_prep']['loss']

best_on_valid = metric_res['validation']
print(' ' * 2 + 'loss' + ':', best_on_valid[1])
print_hps(hp_names, best_on_valid[0], 4)
best_conf = dict(list(zip(hp_names, best_on_valid[0])))
env.build_pupil(
    batch_size=BATCH_SIZE,
    **LSTM_SIZE,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
    going_to_limit_memory=True,
)

env.build_optimizer(
    **OPTIMIZER_PARAMETERS,
    optimizer_init_parameter=best_conf['optimizer_init_parameter'],
)


stop_specs = 20000

learning_rate = dict(
    type='exponential_decay',
    period=4000,
    decay=.5,
    init=best_conf['learning_rate/init'],
)
training_path = os.path.join(base, 'loss_best', 'test', 'training')
env.train_optimizer(
    allow_growth=True,
    save_path=training_path,
Beispiel #2
0
                num_output_nodes=[],
                vocabulary_size=vocabulary_size,
                embedding_size=150,
                num_unrollings=NUM_UNROLLINGS,
                init_parameter=3.,
                num_gpus=1,
                regime='training_with_meta_optimizer',
                additional_metrics=add_metrics,
                going_to_limit_memory=True)

env.build_optimizer(
    regime='train',
    # regime='inference',
    num_optimizer_unrollings=10,
    num_exercises=NUM_EXERCISES,
    res_size=2000,
    permute=False,
    optimizer_for_opt_type='adam',
    additional_metrics=add_metrics,
    clip_norm=1000000.,
    optimizer_init_parameter=.01)

train_opt_add_feed = [{
    'placeholder': 'dropout',
    'value': .9
}, {
    'placeholder': 'optimizer_dropout_keep_prob',
    'value': .9
}]
opt_inf_add_feed = [{
    'placeholder': 'dropout',
Beispiel #3
0
BATCH_SIZE = 32
env.build_pupil(
    batch_size=BATCH_SIZE,
    num_layers=2,
    num_hidden_nodes=[1000],
    input_shape=[784],
    num_classes=10,
    init_parameter=3.,
    additional_metrics=add_metrics,
    regime='training_with_meta_optimizer',
)

env.build_optimizer(
    regime='inference',
    additional_metrics=add_metrics,
    chi_application='exp',
)

print('building is finished')
add_feed = [{
    'placeholder': 'dropout',
    'value': .9
},
            dict(placeholder='learning_rate', value=4.),
            dict(placeholder='chi_contribution', value=.01)]
valid_add_feed = [
    {
        'placeholder': 'dropout',
        'value': 1.
    },
Beispiel #4
0
metric_res = best['adam_prep']['loss']

best_on_valid = metric_res['validation']
print(' ' * 2 + 'loss' + ':', best_on_valid[1])
print_hps(hp_names, best_on_valid[0], 4)
best_conf = dict(list(zip(hp_names, best_on_valid[0])))
env.build_pupil(
    batch_size=BATCH_SIZE,
    **MLP_SIZE,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
)

env.build_optimizer(
    **OPTIMIZER_PARAMETERS,
    clip_norm=best_conf['clip_norm'],
    optimizer_init_parameter=best_conf['optimizer_init_parameter'],
    pupil_learning_rate=best_conf['pupil_learning_rate'],
)

stop_specs = 20000  #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

learning_rate = dict(
    type='exponential_decay',
    period=4000,
    decay=.5,
    init=best_conf['learning_rate/init'],
)
training_path = os.path.join(base, 'loss_best', 'test', 'training')
env.train_optimizer(
    allow_growth=True,
    save_path=training_path,
Beispiel #5
0
    num_nodes=[100],
    num_output_layers=1,
    num_output_nodes=[],
    vocabulary_size=vocabulary_size,
    embedding_size=150,
    num_unrollings=NUM_UNROLLINGS,
    init_parameter=2.,
    num_gpus=1,
    regime='training_with_meta_optimizer',
    going_to_limit_memory=True,
    additional_metrics=add_metrics,
)

env.build_optimizer(
    regime='inference',
    additional_metrics=add_metrics,
    get_omega_and_beta=True,
    matrix_mod='omega',
)


add_feed = [
    {'placeholder': 'dropout', 'value': .9},
    dict(
        placeholder='learning_rate',
        value=2.
    )
]
valid_add_feed = [
    {'placeholder': 'dropout', 'value': 1.},
]
tf.set_random_seed(1)
Beispiel #6
0
best_on_valid = metric_res['validation']
print(' ' * 2 + 'loss' + ':', best_on_valid[1])
print_hps(hp_names, best_on_valid[0], 4)
best_conf = dict(list(zip(hp_names, best_on_valid[0])))
env.build_pupil(
    batch_size=BATCH_SIZE,
    **LSTM_SIZE,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
    going_to_limit_memory=True,
)

env.build_optimizer(
    **OPTIMIZER_PARAMETERS,
    pupil_learning_rate=best_conf['pupil_learning_rate'],
    clip_norm=best_conf['clip_norm'],
)

stop_specs = 1

learning_rate = dict(
    type='exponential_decay',
    period=500,
    decay=.5,
    init=best_conf['learning_rate/init'],
)
training_path = os.path.join(base, 'loss_best', 'test', 'training')
env.train_optimizer(
    allow_growth=True,
    save_path=training_path,
Beispiel #7
0
best_on_valid = metric_res['validation']
print(' ' * 2 + 'loss' + ':', best_on_valid[1])
print_hps(hp_names, best_on_valid[0], 4)
best_conf = dict(list(zip(hp_names, best_on_valid[0])))
env.build_pupil(
    batch_size=BATCH_SIZE,
    **MLP_SIZE,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
)

env.build_optimizer(
    regime='train',
    # regime='inference',
    num_optimizer_unrollings=NUM_OPTIMIZER_UNROLLINGS,
    num_exercises=NUM_EXERCISES,
    optimizer_for_opt_type='adam',
    additional_metrics=add_metrics,
    clip_norm=best_conf['clip_norm'],
    pupil_learning_rate=best_conf['pupil_learning_rate'],
)

stop_specs = NUM_OPTIMIZER_TRAIN_STEPS

learning_rate = dict(
    type='exponential_decay',
    period=500,
    decay=.5,
    init=best_conf['learning_rate/init'],
)
training_path = os.path.join(base, 'loss_best', 'test', 'training')
env.train_optimizer(
Beispiel #8
0
    num_output_layers=1,
    num_output_nodes=[],
    vocabulary_size=vocabulary_size,
    embedding_size=150,
    num_unrollings=NUM_UNROLLINGS,
    init_parameter=3.,
    num_gpus=1,
    regime='training_with_meta_optimizer',
    going_to_limit_memory=True,
    additional_metrics=add_metrics,
)

env.build_optimizer(
    regime='inference',
    additional_metrics=add_metrics,
    selection_application='shuffle',
    # selection_size=2,  # ignored if selection_application is shuffle
    # num_sel=10,
)

add_feed = [{
    'placeholder': 'dropout',
    'value': .9
},
            dict(placeholder='learning_rate', value=4.),
            dict(placeholder='sel_contribution', value=1.),
            dict(placeholder='selection_size', value=2),
            dict(placeholder='num_sel', value=64)]
valid_add_feed = [
    {
        'placeholder': 'dropout',
    num_output_nodes=[],
    vocabulary_size=vocabulary_size,
    embedding_size=150,
    num_unrollings=4,
    init_parameter=3.,
    num_gpus=1,
    regime='training_with_meta_optimizer',
    additional_metrics=add_metrics,
    going_to_limit_memory=True
)

env.build_optimizer(
    regime='train',
    # regime='inference',
    num_optimizer_unrollings=10,
    num_exercises=5,
    res_size=2000,
    permute=False,
    optimizer_for_opt_type='adam',
    additional_metrics=add_metrics
)


train_opt_add_feed = [
    {'placeholder': 'dropout', 'value': .9},
    {'placeholder': 'optimizer_dropout_keep_prob', 'value': .9}
]
opt_inf_add_feed = [
    {'placeholder': 'dropout', 'value': .9},
    {'placeholder': 'optimizer_dropout_keep_prob', 'value': 1.}
]
valid_add_feed = [