Beispiel #1
0
config.p.r = 128
config.regularization = 1.0e-4
config.p.lr_reduction = 2
config.p.gru_depth = 1
config.p.dropout = None
config.p.augmentation = False
config.p.structure_data = True
config.p.attention = True
config.p.full_state_attention = False
config.p.bidirectional = False
config.p.out_layers = 1

config.p.max_epochs = None
train_object = trainer.Trainer(config,
                               load=False,
                               draw_plots=False,
                               save_location='./weights/gen2000',
                               model=model)

train_object.v.optimizer.gradient_clipping = 100.0  # clip the norm to this amount

print()
print()

train_object.run_many_epochs(language_model,
                             plot_every=1000,
                             write_every=10,
                             early_stop=None,
                             save_every=30,
                             validate_every=150000,
                             batch_size=10,
Beispiel #2
0
config.p.r = 128
config.regularization = 1.0e-4
config.p.lr_reduction = 10
config.p.gru_depth = 2
#config.p.dropout = None
#config.p.augmentation= False
config.p.structure_data = True
#config.p.attention=True
#config.p.full_state_attention=False
config.p.bidirectional = True
config.p.out_layers = 1

config.p.max_epochs = None
train_object = trainer.Trainer(config,
                               load=False,
                               draw_plots=False,
                               save_location='./weights/payout_5_med',
                               model=model)

train_object.v.optimizer.gradient_clipping = 100.0  # clip the norm to this amount

print()
print()

train_object.run_many_epochs(language_model,
                             plot_every=10000,
                             write_every=10,
                             early_stop=None,
                             save_every=120,
                             validate_every=400000,
                             batch_size=100,