예제 #1
0
def train_model(args,
                model,
                batchsize,
                precision,
                train,
                test,
                valid,
                patch_size=128,
                model_fname=None):
    image_path = args.images
    epochs = args.epochs
    validbatches = args.validbatches
    steps = args.steps
    early_stop = args.earlystop
    auto_steps = args.autosteps

    model.compile(
        loss=keras.losses.mean_squared_error,
        optimizer=keras.optimizers.RMSprop(lr=0.01, clipnorm=1),
        #              optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['mae'])

    # if exists("student.mdl"):
    #    model.load_weights("student.mdl")

    no_nan = keras.callbacks.TerminateOnNaN()
    tboard = keras.callbacks.TensorBoard()
    callbacks = [no_nan, tboard]
    if (early_stop):
        callbacks.append(
            keras.callbacks.EarlyStopping(monitor='val_mean_absolute_error',
                                          patience=10))

    if (model_fname is not None):
        callbacks.append(
            keras.callbacks.ModelCheckpoint(model_fname,
                                            save_best_only=True,
                                            monitor='val_mean_absolute_error'))

    valid_steps = validbatches
    train_steps = steps
    test_steps = 16
    if (auto_steps):
        valid_steps = int(len(valid) / batchsize)
        train_steps = int(len(train) / batchsize)
        test_steps = int(len(test) / batchsize)

    print('len(train)=' + str(len(train)) + ', len(valid)=' + str(len(valid)) +
          ', len(test)=' + str(len(test)))
    print('train_steps=' + str(train_steps) + ', valid_steps=' +
          str(valid_steps) + ', test_steps=' + str(test_steps))

    model.fit_generator(generate_data(image_path,
                                      train,
                                      batchsize,
                                      patch_size=patch_size,
                                      precision=precision,
                                      use_eyes=args.useeyes),
                        steps_per_epoch=train_steps,
                        epochs=epochs,
                        callbacks=callbacks,
                        verbose=1,
                        validation_data=generate_data(image_path,
                                                      valid,
                                                      batchsize,
                                                      patch_size=patch_size,
                                                      use_eyes=args.useeyes),
                        validation_steps=valid_steps)

    # Evaluate &r
    print('Evaluation on latest model:')
    score = model.evaluate_generator(generate_data(image_path,
                                                   test,
                                                   batchsize,
                                                   patch_size=patch_size,
                                                   use_eyes=args.useeyes),
                                     steps=test_steps)

    last_mae = score[1]
    best_mae = -1
    if (model_fname is not None):
        best_model = keras.models.load_model(model_fname)
        score = best_model.evaluate_generator(generate_data(
            image_path,
            test,
            batchsize,
            patch_size=patch_size,
            use_eyes=args.useeyes),
                                              steps=test_steps)
        best_mae = score[1]

    print('Results: last MAE: ' + str(last_mae) + ", best MAE: " +
          str(best_mae))

    # MAE
    return [last_mae, best_mae]
예제 #2
0
import getopt

seq_len = 50
num_epoch = 25
batch_size = 32
num_char = 500

try:
    opts, args = getopt.getopt(sys.argv[1:], 's:c:e:b:', ['seq=', 'char=', 'epoch=', 'batch='])
except getopt.GetoptError:
    sys.exit(2)

for opt, arg in opts:
	if opt in ('-s', '--seq'):
		seq_len = int(arg)
	elif opt in ('-c', '--char'):
		num_char = int(arg)
	elif opt in ('-e', '--epoch'):
		num_epoch = int(arg)
	elif opt in ('-b', '--batch'):
		batch_size = int(arg)
	else:
		sys.exit(2)

x, y = data_utils.generate_data(seq_len, num_char)
num_classes = x.shape[2]
model = model.RNN_model(num_classes=num_classes,\
						num_epoch=num_epoch,\
						batch_size=batch_size,\
						num_char=num_char)
model.train_model(x, y)
예제 #3
0
from data_utils import generate_data
from plot_data import interactive_plot

train, test = generate_data()

xmin, xmax = interactive_plot(train)
print(xmin, xmax)
예제 #4
0
import numpy as np
import matplotlib.pyplot as plt

import stan_utils as stan
from data_utils import (generate_data, test_data)
from mpl_utils import mpl_style

plt.style.use(mpl_style)

seed = 42

data_kwds = dict(N=1000, D=10, J=3, K=5, seed=seed, full_output=True)
data, truths = generate_data(**data_kwds)

K = 2
fig, axes = plt.subplots(K, K, figsize=(10, 10))

axes[1, 0].scatter(truths["theta"].T[0], truths["theta"].T[1])
axes[1, 0].set_xlabel(r"$J_{{{0}}}$".format(0))
axes[1, 0].set_ylabel(r"$J_{{{0}}}$".format(1))

axes[0, 0].scatter(truths["theta"].T[0], truths["theta"].T[2])
axes[0, 0].set_xlabel(r"$J_{{{0}}}$".format(0))
axes[0, 0].set_ylabel(r"$J_{{{0}}}$".format(2))

axes[0, 1].set_visible(False)
axes[1, 1].scatter(truths["theta"].T[2], truths["theta"].T[1])
axes[1, 1].set_xlabel(r"$J_{{{0}}}$".format(2))
axes[1, 1].set_ylabel(r"$J_{{{0}}}$".format(1))

fig.tight_layout()
예제 #5
0
    #if exists("student.mdl"):
    #    model.load_weights("student.mdl")

    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        monitor='val_mean_absolute_error')
    early_stop = keras.callbacks.EarlyStopping(
        monitor='val_mean_absolute_error', patience=5)
    model_checkpoint = keras.callbacks.ModelCheckpoint(
        "best_student.mdl",
        save_best_only=True,
        monitor='val_mean_absolute_error')
    no_nan = keras.callbacks.TerminateOnNaN()

    model.fit_generator(
        generate_data(image_path, train, batch_size, patch_size),
        steps_per_epoch=train_len / batch_size,
        epochs=epochs,
        callbacks=[reduce_lr, early_stop, model_checkpoint, no_nan],
        verbose=1,
        validation_data=generate_data(image_path, valid, batch_size,
                                      patch_size),
        validation_steps=valid_len / batch_size)

    model.save("student.mdl")

    results.write('Evaluation on latest model:' + lbreak)
    score = model.evaluate_generator(generate_data(image_path, test,
                                                   batch_size, patch_size),
                                     steps=16)
    results.write('\tTest loss:' + str(score[0]) + lbreak)
def train(patch_size, label_path, image_path, train_all_layers):
    # input image dimensions: TODO get from data or command-line params
    input_shape = (patch_size, patch_size, 3)

    train, test, valid = load_data_to_labels(label_path,
                                             train_fraction=0.7,
                                             test_fraction=0.15)

    train_len = len(train)
    test_len = len(test)
    valid_len = len(valid)
    print('Input data: train_len: ' + str(train_len) + ", test_len: " +
          str(test_len) + ", valid_len: " + str(valid_len))

    model = None
    if exists("best_student.mdl"):
        print("Found a pre-trained model, so loading that")
        model = load_model("best_student.mdl")
        print("Making all layers trainable")
        model = make_all_layers_trainable(model)
    else:
        print("No pre-trained model found, creating a new one")
        model = create_model(input_shape)

    if train_all_layers:
        model = make_all_layers_trainable(model)

    model.compile(
        loss=keras.losses.mean_squared_error,
        optimizer=keras.optimizers.RMSprop(lr=0.01, clipnorm=1),
        #              optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['mae'])

    #if exists("student.mdl"):
    #    model.load_weights("student.mdl")

    reduce_lr = keras.callbacks.ReduceLROnPlateau(
        monitor='val_mean_absolute_error')
    early_stop = keras.callbacks.EarlyStopping(
        monitor='val_mean_absolute_error', patience=10)
    model_checkpoint = keras.callbacks.ModelCheckpoint(
        "best_student.mdl",
        save_best_only=True,
        monitor='val_mean_absolute_error')
    no_nan = keras.callbacks.TerminateOnNaN()
    tb = keras.callbacks.TensorBoard()
    plotter = Plotter(input_shape[0])

    # TODO New callback to do sample inference after each epoch
    model.fit_generator(generate_data(image_path, train, batch_size,
                                      patch_size),
                        steps_per_epoch=steps_per_epoch,
                        epochs=epochs,
                        callbacks=[
                            reduce_lr, early_stop, model_checkpoint, no_nan,
                            tb, plotter
                        ],
                        verbose=1,
                        validation_data=generate_data(image_path, valid,
                                                      batch_size, patch_size),
                        validation_steps=valid_len / batch_size)

    model.save("transfer_student.mdl")

    print('Evaluation on latest model:')
    score = model.evaluate_generator(generate_data(image_path, test,
                                                   batch_size, patch_size),
                                     steps=16)
    print('\tTest loss:', score[0])
    print('\tTest MSE:', score[1])

    best_model = keras.models.load_model('best_student.mdl')
    print('Evaluation on best model:')
    score = best_model.evaluate_generator(generate_data(
        image_path, test, batch_size, patch_size),
                                          steps=16)
    print('\tTest loss:', score[0])
    print('\tTest MSE:', score[1])

    num_test_samples = 512
    # evaluating on latest model
    print("\nCalculating error over " + str(num_test_samples) +
          " test samples... (using latest model)")
    predictions = model.predict_generator(generate_data(
        image_path, test, num_test_samples, patch_size),
                                          steps=1)
    np.save("predictions_latest.npy", predictions)

    # Make it easy to compare predictions to actuals for test data
    test_vs_predict = []
    for i in range(0, num_test_samples):
        sample = {
            "file": test[i][0],
            "brightness": test[i][1],
            "sharpness": test[i][2],
            "brightness_student": predictions[i][0],
            "sharpness_student": predictions[i][1]
        }
        test_vs_predict.append(sample)

    print("Saving errors on " + str(num_test_samples) + " to csv")
    df = pd.DataFrame(test_vs_predict)
    df.to_csv("predictions_vs_test_latest.csv")

    # evaluating on best model
    print("\nCalculating error over " + str(num_test_samples) +
          " test samples... (using best model)")
    predictions = best_model.predict_generator(generate_data(
        image_path, test, num_test_samples, patch_size),
                                               steps=1)
    np.save("predictions_best.npy", predictions)

    # Make it easy to compare predictions to actuals for test data
    test_vs_predict = []
    for i in range(0, num_test_samples):
        sample = {
            "file": test[i][0],
            "brightness": test[i][1],
            "sharpness": test[i][2],
            "brightness_student": predictions[i][0],
            "sharpness_student": predictions[i][1]
        }
        test_vs_predict.append(sample)

    print("Saving errors on " + str(num_test_samples) + " to csv")
    df = pd.DataFrame(test_vs_predict)
    df.to_csv("predictions_vs_test_best.csv")