Example #1
0
print("Build training function ...")
f_train = K.function([X, y_true], cost_train, updates=updates)
print("Build scoring function ...")
f_score = K.function([X, y_true], [cost_pred, cost_eval])

# ===========================================================================
# Create trainer
# ===========================================================================
print("Create trainer ...")
trainer = training.MainLoop(batch_size=32, seed=12082518, shuffle_level=2)
trainer.set_save(utils.get_modelpath('cifar10.ai', override=True), f)
trainer.set_task(f_train, [X_learn, y_learn], epoch=25, p=1, name='Train')
trainer.set_subtask(f_score, [X_test, y_test], freq=1, name='Valid')
trainer.set_callback([
    training.ProgressMonitor(name='Train', format='Results: {:.4f}'),
    training.ProgressMonitor(name='Valid', format='Results: {:.4f},{:.4f}'),
    # early stop based on crossentropy on test (not a right procedure,
    # but only for testing)
    training.EarlyStopGeneralizationLoss(
        name='Valid',
        threshold=5,
        patience=3,
        get_value=lambda x: np.mean([j for i, j in x])),
    training.History()
])
trainer.run()

# ===========================================================================
# Evaluation and visualization
# ===========================================================================
Example #2
0
print('Building predicting functions ...')
f_pred = K.function(X, y_pred_score)

# ===========================================================================
# Build trainer
# ===========================================================================
print('Start training ...')
task = training.MainLoop(batch_size=64, seed=12, shuffle_level=2)
task.set_save(get_modelpath(name='mnist.ai', override=True), ops)
task.set_task(f_train, (ds['X_train'], ds['y_train']),
              epoch=arg['epoch'],
              name='train')
task.set_subtask(f_test, (ds['X_test'], ds['y_test']), freq=0.6, name='valid')
task.set_subtask(f_test, (ds['X_test'], ds['y_test']), when=-1, name='test')
task.set_callback([
    training.ProgressMonitor(name='train', format='Results: {:.4f}-{:.4f}'),
    training.ProgressMonitor(name='valid',
                             format='Results: {:.4f}-{:.4f}',
                             tracking={2: lambda x: sum(x)}),
    training.ProgressMonitor(name='test', format='Results: {:.4f}-{:.4f}'),
    training.History(),
    training.EarlyStopGeneralizationLoss('valid', threshold=5, patience=3),
    training.NaNDetector(('train', 'valid'), patience=3, rollback=True)
])
task.run()

# ====== plot the training process ====== #
task['History'].print_info()
task['History'].print_batch('train')
task['History'].print_batch('valid')
task['History'].print_epoch('test')
Example #3
0
    N.Dense(num_units=nb_labels, activation=K.softmax)
],
               debug=True)

y_pred = f(X)
params = [p for p in f.parameters if not has_roles(p, EmbeddingWeight)]
print('Params:', [p.name for p in params])

cost_train = K.mean(K.categorical_crossentropy(y_pred, y))
cost_score = K.mean(K.categorical_accuracy(y_pred, y))

opt = K.optimizers.RMSProp()
updates = opt.get_updates(cost_train, params)

print('Build training function ...')
f_train = K.function([X, y], cost_train, updates)
print('Build scoring function ...')
f_score = K.function([X, y], cost_score)

trainer = training.MainLoop(batch_size=128, seed=1208, shuffle_level=2)
trainer.set_task(f_train, (X_train, y_train),
                 epoch=args['epoch'],
                 name='train')
trainer.set_subtask(f_score, (X_valid, y_valid), freq=1., name='valid')
trainer.set_callback([
    training.ProgressMonitor('train', format='Train:{:.4f}'),
    training.ProgressMonitor('valid', format='Test:{:.4f}'),
    training.History()
])
trainer.run()
Example #4
0
print('Build scoring function ODIN ...')
f_score = K.function([X, y], cost_score)
print('Build predicting function ODIN ...')
f_pred = K.function(X, y_odin_score)

trainer = training.MainLoop(batch_size=batch_size,
                            seed=12082518,
                            shuffle_level=2)
trainer.set_task(f_train, (ds['X_train'], ds['y_train']),
                 epoch=nb_epoch,
                 name='Train')
trainer.set_subtask(f_score, (ds['X_test'], ds['y_test']),
                    freq=1.0,
                    name='Valid')
trainer.set_callback([
    training.ProgressMonitor('Train', 'Result: {:.4f}'),
    training.ProgressMonitor('Valid', 'Result: {:.4f}'),
    training.History()
])
trainer.run()

# ===========================================================================
# Keras
# ===========================================================================
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Convolution1D, MaxPooling1D

model = Sequential()