Exemplo n.º 1
0
f_train = K.function(inputs, [ce, acc, cm], updates=updates, training=True)
print('Building testing functions ...')
f_score = K.function(inputs, [ce, acc, cm], training=False)
print('Building predicting functions ...')
f_pred_proba = K.function(X, y_proba, training=False)
# Latent spaces
f_z1 = K.function(inputs=X, outputs=z1, training=False)
f_z2 = K.function(inputs=X, outputs=z2, training=False)
f_z3 = K.function(inputs=X, outputs=z3, training=False)
# ===========================================================================
# Training
# ===========================================================================
print('Start training ...')
task = training.MainLoop(batch_size=args.batch,
                         seed=1234,
                         shuffle_level=2,
                         allow_rollback=True,
                         labels=labels)
task.set_checkpoint(MODEL_PATH, f)
task.set_callbacks([
    training.NaNDetector(),
    training.EarlyStopGeneralizationLoss('valid', ce, threshold=5, patience=5)
])
task.set_train_task(func=f_train, data=train, epoch=args.epoch, name='train')
task.set_valid_task(func=f_score,
                    data=valid,
                    freq=training.Timer(percentage=0.8),
                    name='valid')
task.run()
# ===========================================================================
# Prediction
Exemplo n.º 2
0
# Build trainer
# ===========================================================================
# ====== spliting the data ====== #
idx = np.arange(len(X_train), dtype='int32')
idx_train, idx_valid = train_valid_test_split(idx, train=0.8,
                                              inc_test=False, seed=1234)
X_valid = X_train[idx_valid]
y_valid = y_train[idx_valid]
X_train = X_train[idx_train]
y_train = y_train[idx_train]
print("#Train:", X_train.shape, y_train.shape)
print("#Valid:", X_valid.shape, y_valid.shape)
print("#Test:", X_test.shape, y_test.shape)
# ====== trainign ====== #
print('Start training ...')
task = training.MainLoop(batch_size=128, seed=1234, shuffle_level=2,
                         allow_rollback=True)
task.set_checkpoint(MODEL_PATH, model)
task.set_callbacks([
    training.NaNDetector(),
    training.EarlyStopGeneralizationLoss('valid', ce, threshold=5, patience=3)
])
task.set_train_task(func=f_train,
                    data=(X_train, y_train),
                    epoch=NB_EPOCH,
                    name='train')
task.set_valid_task(func=f_test,
                    data=(X_valid, y_valid),
                    freq=training.Timer(percentage=0.6),
                    name='valid')
task.set_eval_task(func=f_test,
                   data=(X_test, y_test),
Exemplo n.º 3
0
    verbose=True)
K.initialize_all_variables()
# # ====== Functions ====== #
print('Building training functions ...')
f_train = K.function(inputs, [ce, acc], updates=updates, training=True)
print('Building testing functions ...')
f_score = K.function(inputs, [ce, acc], training=False)
# Latent spaces
f_z = K.function(inputs=X, outputs=z, training=False)
# ===========================================================================
# Create trainer
# ===========================================================================
if TRAIN_MODEL:
    print('Start training ...')
    task = training.MainLoop(batch_size=args.batch,
                             seed=120825,
                             shuffle_level=2,
                             allow_rollback=True)
    task.set_checkpoint(MODEL_PATH, x_vec)
    task.set_callbacks([
        training.NaNDetector(),
        training.EarlyStopGeneralizationLoss('valid',
                                             ce,
                                             threshold=5,
                                             patience=5)
    ])
    task.set_train_task(func=f_train,
                        data=train,
                        epoch=args.epoch,
                        name='train')
    task.set_valid_task(func=f_score,
                        data=valid,
Exemplo n.º 4
0
parameters = f.parameters
print('Parameters:', [p.name for p in parameters])

optz = K.optimizers.RMSProp()
updates = optz.get_updates(cost_train, parameters)

print("Build training function ...")
f_train = K.function([X, y_true], cost_train, updates=updates)
print("Build scoring function ...")
f_score = K.function([X, y_true], [cost_pred, cost_eval])

# ===========================================================================
# Create trainer
# ===========================================================================
print("Create trainer ...")
trainer = training.MainLoop(batch_size=32, seed=12082518, shuffle_level=2)
trainer.set_save(utils.get_modelpath('cifar10.ai', override=True), f)
trainer.set_task(f_train, [X_learn, y_learn], epoch=25, p=1, name='Train')
trainer.set_subtask(f_score, [X_test, y_test], freq=1, name='Valid')
trainer.set_callback([
    training.ProgressMonitor(name='Train', format='Results: {:.4f}'),
    training.ProgressMonitor(name='Valid', format='Results: {:.4f},{:.4f}'),
    # early stop based on crossentropy on test (not a right procedure,
    # but only for testing)
    training.EarlyStopGeneralizationLoss(
        name='Valid',
        threshold=5,
        patience=3,
        get_value=lambda x: np.mean([j for i, j in x])),
    training.History()
])
Exemplo n.º 5
0
parameters = ops.parameters
optimizer = K.optimizers.SGD(lr=arg['lr'])
updates = optimizer(cost_train, parameters)
print('Building training functions ...')
f_train = K.function([X, y], [cost_train, optimizer.norm], updates=updates)
print('Building testing functions ...')
f_test = K.function([X, y], [cost_test_1, cost_test_2, cost_test_3])
print('Building predicting functions ...')
f_pred = K.function(X, y_pred_score)

# ===========================================================================
# Build trainer
# ===========================================================================
print('Start training ...')
task = training.MainLoop(batch_size=64, seed=12, shuffle_level=2)
task.set_save(get_modelpath(name='mnist.ai', override=True), ops)
task.set_task(f_train, (ds['X_train'], ds['y_train']),
              epoch=arg['epoch'],
              name='train')
task.set_subtask(f_test, (ds['X_test'], ds['y_test']), freq=0.6, name='valid')
task.set_subtask(f_test, (ds['X_test'], ds['y_test']), when=-1, name='test')
task.set_callback([
    training.ProgressMonitor(name='train', format='Results: {:.4f}-{:.4f}'),
    training.ProgressMonitor(name='valid',
                             format='Results: {:.4f}-{:.4f}',
                             tracking={2: lambda x: sum(x)}),
    training.ProgressMonitor(name='test', format='Results: {:.4f}-{:.4f}'),
    training.History(),
    training.EarlyStopGeneralizationLoss('valid', threshold=5, patience=3),
    training.NaNDetector(('train', 'valid'), patience=3, rollback=True)
                                         total_stdev=stdev_total,
                                         fontsize=8,
                                         title="Test Sample #%d" %
                                         i if j == 0 else None,
                                         **kws)
            curr_grid_index += 3
    V.plot_save(os.path.join(FIGURE_PATH, 'latent_%d.png' % curr_epoch),
                dpi=200,
                log=True)
    exit()


# ====== training ====== #
runner = T.MainLoop(batch_size=args.batch,
                    seed=1234,
                    shuffle_level=2,
                    allow_rollback=False,
                    verbose=2)
runner.set_callbacks([
    T.NaNDetector(task_name=None, patience=-1, detect_inf=True),
    None if args.no_monitor else T.EpochSummary(
        task_name=('train', 'valid'),
        output_name=(loss, iw_loss, KL_mean, NLLK_mean),
        print_plot=False,
        save_path=os.path.join(FIGURE_PATH, 'summary.png')),
    T.LambdaCallback(fn=plot_epoch, task_name='train')
])
runner.set_train_task(func=f_train,
                      data=[X_train, X_train],
                      epoch=args.epoch,
                      name='train')
Exemplo n.º 7
0
print('Building training functions ...')
f_train = K.function(inputs, [ce, acc, optimizer.norm],
                     updates=updates,
                     training=True)
print('Building testing functions ...')
f_score = K.function(inputs, [ce, acc],
                    training=False)
# Latent spaces
f_z = K.function(inputs=X, outputs=z,
                 training=False)
# ===========================================================================
# Create trainer
# ===========================================================================
print('Start training ...')
task = training.MainLoop(batch_size=BATCH_SIZE, seed=120825,
                         shuffle_level=2, allow_rollback=False,
                         verbose=4)
task.set_checkpoint(path=MODEL_PATH, obj=x_vec,
                    increasing=True, max_checkpoint=-1)
task.set_callbacks([
    training.NaNDetector(task_name='train', patience=-1),
    training.CheckpointEpoch(task_name='train', epoch_percent=0.5),
    # training.EarlyStopGeneralizationLoss('valid', ce,
    #                                      threshold=5, patience=3)
])
task.set_train_task(func=f_train, data=train,
                    epoch=EPOCH, name='train')
task.set_valid_task(func=f_score, data=valid,
                    freq=training.Timer(percentage=0.5),
                    name='valid')
task.run()
Exemplo n.º 8
0
                     training=True)
print('Building testing functions ...')
f_test = K.function(inputs=inputs, outputs=[ce, acc, cm], training=False)
print('Building predicting functions ...')
f_pred = K.function(inputs=inputs, outputs=y_prob, training=False)
print("Building other functions ...")
f_e = K.function(inputs=inputs, outputs=E, training=False)
f_z = K.function(inputs=inputs, outputs=Z_infer, training=False)
f_d = K.function(inputs=inputs, outputs=D_infer, training=False)
# ===========================================================================
# Training
# ===========================================================================
print('Start training ...')
task = training.MainLoop(batch_size=BATCH_SIZE,
                         seed=120825,
                         shuffle_level=2,
                         allow_rollback=True,
                         labels=digits)
task.set_checkpoint(MODEL_PATH, [f_encoder, f_decoder])
task.set_callbacks([
    training.NaNDetector(),
    training.EarlyStopGeneralizationLoss('valid', ce, threshold=5, patience=5)
])
task.set_train_task(f_train, train, epoch=25, name='train')
task.set_valid_task(f_test,
                    valid,
                    freq=training.Timer(percentage=0.8),
                    name='valid')
task.run()

Exemplo n.º 9
0
    N.Dense(num_units=nb_labels, activation=K.softmax)
],
               debug=True)

y_pred = f(X)
params = [p for p in f.parameters if not has_roles(p, EmbeddingWeight)]
print('Params:', [p.name for p in params])

cost_train = K.mean(K.categorical_crossentropy(y_pred, y))
cost_score = K.mean(K.categorical_accuracy(y_pred, y))

opt = K.optimizers.RMSProp()
updates = opt.get_updates(cost_train, params)

print('Build training function ...')
f_train = K.function([X, y], cost_train, updates)
print('Build scoring function ...')
f_score = K.function([X, y], cost_score)

trainer = training.MainLoop(batch_size=128, seed=1234, shuffle_level=2)
trainer.set_task(f_train, (X_train, y_train),
                 epoch=args['epoch'],
                 name='train')
trainer.set_subtask(f_score, (X_valid, y_valid), freq=1., name='valid')
trainer.set_callback([
    training.ProgressMonitor('train', format='Train:{:.4f}'),
    training.ProgressMonitor('valid', format='Test:{:.4f}'),
    training.History()
])
trainer.run()
Exemplo n.º 10
0
# ===========================================================================
updates = optimizer.get_updates(cost_train, parameters)

# ====== create function ====== #
print('Building training functions ...')
f_train = K.function([X, y], [cost_train, optimizer.norm], updates=updates)
print('Building testing functions ...')
f_test = K.function([X, y], [cost_test_1, cost_test_2, cost_test_3])
print('Building predicting functions ...')
f_pred = K.function(X, y_score)

# ===========================================================================
# Build trainer
# ===========================================================================
print('Start training ...')
task = training.MainLoop(batch_size=args['bs'], seed=1208, shuffle_level=2)
task.set_save(get_modelpath(name='digit_audio.ai', override=True), f)
task.set_task(f_train, train_feeder, epoch=args['epoch'], name='train')
task.set_subtask(f_test, valid_feeder, freq=0.6, name='valid')
task.set_subtask(f_test, test_feeder, when=-1, name='test')
task.set_callback([
    training.ProgressMonitor(name='train', format='Results: {:.4f}-{:.4f}'),
    training.ProgressMonitor(name='valid',
                             format='Results: {:.4f}-{:.4f}',
                             tracking={2: lambda x: sum(x)}),
    training.ProgressMonitor(name='test', format='Results: {:.4f}-{:.4f}'),
    training.History(),
    training.EarlyStopGeneralizationLoss('valid', threshold=5, patience=3),
    training.NaNDetector(('train', 'valid'), patience=3, rollback=True)
])
task.run()
Exemplo n.º 11
0
# ===========================================================================
print('Start training ...')
# ====== some configurations ====== #
model_save_path = '/tmp/EXP_MNIST'
if os.path.exists(model_save_path):
    shutil.rmtree(model_save_path)
os.mkdir(model_save_path)

print("Save path:", model_save_path)
N_EPOCH = 120
BATCH_SIZE = 512

# ====== run the training ====== #
task = training.MainLoop(batch_size=BATCH_SIZE,
                         seed=12,
                         shuffle_level=2,
                         allow_rollback=True,
                         verbose=1)
task.set_checkpoint(os.path.join(model_save_path, 'checkpoint'),
                    ops,
                    max_checkpoint=-1)
task.set_callbacks([
    training.NaNDetector(),
    training.CheckpointEpoch('train', epoch_percent=1.),
    training.EarlyStopGeneralizationLoss('valid',
                                         loss,
                                         threshold=5,
                                         patience=3),
    training.LambdaCallback(fn=lambda t: print(str(t)),
                            task_name='train',
                            signal=training.TaskSignal.EpochEnd),