Example #1
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, 1, 4), Input data for train.
        y_train: ndarray(number, ), result data for train.
        (7776, 4, 1)
        (7776,)
        name: String, name of model.
        config: Dict, parameter for train.
    """
    opt = keras.optimizers.Adam(learning_rate=0.001)
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    model.compile(loss="mse", optimizer=opt, metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/model_out/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/model_loss/' + name + '_loss.csv',
              encoding='utf-8',
              index=False)
Example #2
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """
    mlflow.set_tracking_uri("http://127.0.0.1:5000")
    tracking_uri = mlflow.get_tracking_uri()
    print("Current tracking uri: {}".format(tracking_uri))

    tags = {"usuario": "Anonymous"}

    mlflow.set_experiment("traffic_flow-saes")
    with mlflow.start_run() as run:
        mlflow.set_tags(tags)
        mlflow.keras.autolog()

        model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
        #early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
        hist = model.fit(X_train,
                         y_train,
                         batch_size=config["batch"],
                         epochs=config["epochs"],
                         validation_split=0.05)

        model.save('model/' + name + '.h5')
        df = pd.DataFrame.from_dict(hist.history)
        df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
        mlflow.log_param("Run_id", run.info.run_id)
Example #3
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
        
    """
    # Define the Keras TensorBoard callback.
    logdir = os.path.join(
    "logs",
    "fit",
    name,
    'lstm_4_4',
    datetime.now().strftime("%Y%m%d-%H%M"),
)
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

    model.compile(loss="mse", optimizer="adam", metrics=['mape'])
    early = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
    hist = model.fit(
        X_train, y_train,
        batch_size=config["batch"],
        epochs=config["epochs"],
        validation_split=0.05,
        callbacks=[tensorboard_callback, early])
    print(name);
    model.save('model/' + name + '4_layers_4'  + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name  +' loss.csv', encoding='utf-8', index=False)
Example #4
0
def train_model(model,X_train,y_trian,name,config):
    model.compile(loss='mse',optimizer='rmsprop',metrics=['mape'])
    hist=model.fit(X_train,y_trian,
        batch_size=config['batch'],
        epochs=config['epochs'],
        validation_split=0.05)
    model.save('model/'+name+'.h5')
    df=pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/'+name+'_loss.csv',encoding='utf-8',index=False)
Example #5
0
def _train_test():
    tag_folder = '../data/2015/training/event_tags/'
    data_folder = '../data/2015/training/stanford_parse/'
    data = get_data(tag_folder, data_folder)

    if not combined:
        train_data_context_x, train_data_context_pos_deprel, train_data_lemma_x, train_data_pos_deprel, train_data_children_pos_deprel, train_data_y = _get_data(
            data)
    else:
        train_x1, train_y1 = _get_joint(data)

    tag_folder = '../data/2015/eval/event_tags/'
    data_folder = '../data/2015/eval/stanford_parse/'
    data = get_data(tag_folder, data_folder)

    if not combined:
        test_data_context_x, test_data_context_pos_deprel, test_data_lemma_x, test_data_pos_deprel, test_data_children_pos_deprel, test_data_y = _get_data(
            data)
    else:
        train_x2, train_y2 = _get_joint(data)

    tag_folder = '../data/2016/event_tags/'
    data_folder = '../data/2016/stanford_parse/'
    data = get_data(tag_folder, data_folder)
    train_x3, train_y3 = _get_joint(data)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if not combined:
        model.fit([
            np.array(train_data_context_x + test_data_context_x),
            np.array(train_data_context_pos_deprel +
                     test_data_context_pos_deprel),
            np.array(train_data_lemma_x + test_data_lemma_x),
            np.array(train_data_pos_deprel + test_data_pos_deprel),
            np.array(train_data_children_pos_deprel +
                     test_data_children_pos_deprel),
        ],
                  np.array(train_data_y + test_data_y),
                  batch_size=1500,
                  nb_epoch=15,
                  verbose=1,
                  shuffle=True)
    else:
        model.fit(np.array(train_x1 + train_x2 + train_x3),
                  np.array(train_y1 + train_y2 + train_y3),
                  batch_size=1000,
                  nb_epoch=15,
                  verbose=1,
                  shuffle=True)

    model.save('realis_models/model_6.h5')
    """
Example #6
0
def train_model(model, X_train, y_train, name, config):
    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)
    temp = 'scaler'
    model.save('model/' + name + temp + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + temp + ' loss.csv',
              encoding='utf-8',
              index=False)
Example #7
0
def train_model(model, X_train, y_train, name, config):

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
Example #8
0
def train_allDense_model(model, X_train, y_train, name, config, lag):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"])

    model.save('model/' + name + '-' + str(lag) + '.h5')
Example #9
0
def run(savefile, lr):
    model = None
    try:
        model = load_model(savefile)
        print('Loaded model from', savefile)
    except OSError:
        print('Saved model not found. Making new model...')
        from model import model

    optimizer = SGD(lr=lr,
                    clipnorm=1.,
                    decay=1e-6,
                    momentum=0.9,
                    nesterov=True)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    train(model)
    save(model)
Example #10
0
def train_model(model, X_train, y_train, name, config, lag):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '-' + str(lag) + '.h5')
Example #11
0
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['rmse'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(X_train,
                     y_train,
                     batch_size=config["batch"],
                     epochs=config["epochs"],
                     validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)
    def train():

        # compile the model with all of the training parameters (should be done *after* setting layers to non-trainable)
        model.compile(optimizer=training_config['optimizer'], loss=training_config['loss_function'],
                  metrics=training_config['metrics'])


        # create csv logger to store to CSV
        csv_logging = callbacks.CSVLogger(training_filepath, separator=',', append=False)
        model_checkpoint = callbacks.ModelCheckpoint(snapshots_dir + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                                                     monitor='val_loss', verbose=0,
                                                     save_best_only=False, save_weights_only=False,
                                                     mode='auto', period=1)

        # train the model on the new data for a few epochs
        print "training model with full model"
        model.fit_generator(
            train_generator,
            samples_per_epoch=nb_train_samples,
            nb_epoch=training_config['nb_epoch'],
            validation_data=val_generator,
            nb_val_samples=nb_val_samples,
            callbacks=[csv_logging, model_checkpoint]
        )
Example #13
0
import keras
from keras.callbacks import CSVLogger

import path
from model import model
import batch
import logger

path.run.make()
run = path.run.loadCurrent()

model.compile(loss='categorical_crossentropy',
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])

model.fit_generator(batch.trainIterator,
                    validation_data=batch.validationIterator,
                    steps_per_epoch=batch.trainIterator.n / batch.size,
                    epochs=8,
                    callbacks=[
                        keras.callbacks.CSVLogger(run.log,
                                                  separator=',',
                                                  append=False)
                    ])

model.save(run.model)
logger.addModelDiagram(run)
logger.addModelSummary(run)
logger.addAccuracyPlot(run)
Example #14
0
        'labels':
        np.array([int(label) for label in labels])
    })
train_dataset_info = np.array(train_dataset_info)

# images, labels = next(train_datagen)
# fig, ax = plt.subplots(1,5,figsize=(25,5))
# for i in range(5):
#    ax[i].imshow(images[i])
# print('min: {0}, max: {1}'.format(images.min(), images.max()))
# fig.show()

model = model((299, 299, 3), output_shape=28)

model.compile(loss="categorical_crossentropy",
              optimizer=Adam(1e-3),
              metrics=["acc"])

checkpointer = ModelCheckpoint('./res/model/inceptionv4.model',
                               verbose=2,
                               save_best_only=True)
tensorboard = TensorBoard(log_dir='./res/logs',
                          histogram_freq=0,
                          batch_size=BATCH_SIZE,
                          write_graph=True,
                          write_grads=False,
                          write_images=False,
                          embeddings_freq=0,
                          embeddings_layer_names=None,
                          embeddings_metadata=None,
                          embeddings_data=None)
Example #15
0
y_train = np.asarray(y_train) / 100
y_test = np.asarray(y_test) / 100

FR = FR_model()
# normalizing the dataset
train_set = normalize(train_set)
test_set = normalize(test_set)

# getting corresponding embeddings
train_set = FR(train_set)
test_set = FR(test_set)

model = model()

model.compile(tf.keras.optimizers.Adam(),
              tf.keras.losses.MeanAbsoluteError(),
              metrics=['accuracy'])

model.fit(train_set,
          y_train,
          batch_size=opt.b_size,
          epochs=opt.epochs,
          validation_data=(test_set, y_test))

optimizer = tf.keras.optimizers.Adam()
batch_size = opt.b_size
"""for i in range(opt.epochs):
    n_batches=int(len(train_set)/opt.b_size)
    loss_t=0
    loss_vt=0
    it=0
Example #16
0
eval_fp = os.path.join(args.data_dir, "eval.tfrecord")
assert os.path.isfile(eval_fp), f"No validation file found at {eval_fp}"

params = params.Params(json_path)

model = model.StylometerModel(params)

log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
os.makedirs(log_dir)

train = dataset.training_dataset(train_fp, params)
validation = dataset.training_dataset(eval_fp, params)

model.compile(
    optimizer="adam",
    loss=
    "categorical_crossentropy",  # tf.contrib.losses.metric_learning.triplet_semihard_loss,
    metrics=["accuracy"],  # keras_metrics.precision(), keras_metrics.recall()
)

# Creating Keras callbacks
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                      histogram_freq=1)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
    "training_checkpoints/weights.{epoch:02d}.hdf5",  # -{val_loss:.2f}
    save_freq=5,
    monitor="val_loss",
)
os.makedirs("training_checkpoints/", exist_ok=True)
early_stopping_checkpoint = tf.keras.callbacks.EarlyStopping(patience=5)

history = model.fit(
Example #17
0
from model import model
from preprocess.generator import train_generator, valid_generator


# Custom loss function
def huber_loss(y_true, y_pred):
    return tf.losses.huber_loss(y_true, y_pred)


# Creating the model
model = model()
model.summary()

# Compile
adam = Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=1e-10, decay=0.0)
model.compile(optimizer=adam, loss=huber_loss)

# Train
epochs = 10
train_gen = train_generator(sample_per_batch=2, batch_number=1690)
val_gen = valid_generator(sample_per_batch=50, batch_number=10)

checkpoints = ModelCheckpoint('weights/weights{epoch:03d}.h5', save_weights_only=True, period=1)
history = model.fit_generator(train_gen, steps_per_epoch=1690, epochs=epochs, verbose=1, shuffle=True,
                              validation_data=val_gen, validation_steps=10,
                              callbacks=[checkpoints], max_queue_size=100)

with open('history.txt', 'a+') as f:
    print(history.history, file=f)

print('All Done!')
from model import model, preprocess_input, smodel
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau
from generator import Generator
import json

if __name__ == '__main__':
    model = smodel()
    opt = SGD(lr=0.01, momentum=0.9)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    print(model.summary())
    model.load_weights('weights/classifier.h5')

    listsss = json.load(open('list_withbndbx.json', 'r'))
    train_gen = Generator(
        listsss[:7211],
        '/home/palm/PycharmProjects/DATA/Tanisorn/imgCarResize/',
        preprocess_function=preprocess_input)
    test_gen = Generator(
        listsss[7211:],
        '/home/palm/PycharmProjects/DATA/Tanisorn/imgCarResize/',
        preprocess_function=preprocess_input)
    reduce_lr_01 = ReduceLROnPlateau(monitor='val_1st_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0,
                                     mode='max')
    reduce_lr_02 = ReduceLROnPlateau(monitor='val_2nd_acc',
                                     factor=0.2,
""" Normalization """
x_train = x_train / 255.
x_test = x_test / 255.

""" From numerical label to categorical label """
y_train = to_categorical(y_train, classes)
y_test = to_categorical(y_test, classes)

""" Building the Model """
model = model()
model.summary()
""" End of the Model """

""" Compilation """
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

""" Training """
history = model.fit(x_train, y_train, batch_size=256, epochs=120, verbose=2, shuffle=True,
                    validation_data=(x_test, y_test))

""" Evaluation of the model """
score = model.evaluate(x_test, y_test, verbose=1)
print('Test set loss: {0:.2f} and Accuracy: {1:.2f}%'.format(score[0], score[1] * 100))

""" Saving the weight file """
model.save('MODEL DATA/cifar-10.h5')

""" Saving the history in a text file """
with open('history.txt', 'a+') as f:
    print(history.history, file=f)
Example #20
0
for training_filename in tqdm.tqdm(randomized_files):

    try:
        audio, _ = librosa.load(training_filename, sr=p.sr, mono=True)
        print(audio)
        audio = audio.reshape(-1, 1)
        training_data = training_data + audio.tolist()
    except:
        pass

training_data = np.array(training_data)
print(training_data)
training_audio_length = len(training_data)
np.save('training_data', training_data)
logger.info('preprocessing..')
training_data = preprocess.preprocess(training_data, p.bs, p.fsz, p.fs)
logger.info('**training started**')

model = model.WaveNet(p.isz, p.nl, p.ks, p.dr, p.nf).model()
model.compile(loss='categorical_crossentropy', optimizer='adam')
if p.s == True:
    model.summary()

model.fit(training_data,
          epochs=p.epochs,
          steps_per_epoch=training_audio_length // 128,
          verbose=1,
          callbacks=[earlystopping_callback, tensorboard_callback])
model.save('src/trained_model/modelWN.h5')
logger.info("model saved in model/n Training finished successfully")
#Load data collected from the simulator
csv_data_path = "../data/driving_log.csv"
output_model_file_path = '../results/models/model.h5'

simulator_data = DataLoadHelper(csv_data_path)
train_generator = simulator_data.load_train_data_from_generator()
validation_generator = simulator_data.load_validation_data_from_generator()

#Clear any previous keras sessions
K.clear_session()

#Create an object to the model, compile, fit and save it to .h5 file
model = model(input_size=(160, 320, 3))

model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, samples_per_epoch=simulator_data.sample_training_size(), \
                    validation_data=validation_generator, nb_val_samples=simulator_data.sample_validation_size(),
                    nb_epoch=3, verbose=1)

model.save(output_model_file_path)

### print the keys contained in the history object
print(history_object.history.keys())

### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=10,
                         zoom_range=0.05,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.15,
                         horizontal_flip=False,
                         fill_mode="nearest")

# initialize and compile our deep neural network
print("[INFO] Compiling model...")
opt = SGD(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model = model.get_model()
model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])

# train the network
print("[INFO] Training network...")
H = model.fit(aug.flow(trainX, trainY, batch_size=BS),
              validation_data=(testX, testY),
              steps_per_epoch=len(trainX) // BS,
              epochs=EPOCHS,
              verbose=1)

# define the list of label names
labelNames = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
labelNames = [l for l in labelNames]

# evaluate the network
Example #23
0
from keras.optimizers import Adam
from nmt_utils import *
from model import model

m = 10000
Tx = 30
Ty = 10
n_a = 32
n_s = 64
learning_rate = 0.005
batch_size = 100

dataset, human_vocab, machine_vocab, inv_vocab = load_dataset(m)
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
# model.summary()
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, decay=0.001)
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0, 1))
model.fit([Xoh, s0, c0], outputs, epochs=50, batch_size=batch_size)
model.save_weights('models/model_50.h5')
Example #24
0
X = np.array(X)
Y = np.array(Y)
X = X.reshape(-2, 2)
Y = Y.reshape(-1, 1)

#read the file for testing
filename = 'func_vals_test.csv'
raw_data = open(filename, 'r')
reader = csv.reader(raw_data)

#load dataset
dataset = np.loadtxt(raw_data, delimiter=",")

#split into input and output variables
X_test = dataset[:, 0:2]
Y_test = dataset[:, 2]

X_test = np.array(X_test)
Y_test = np.array(Y_test)
X_test = X_test.reshape(-2, 2)
Y_test = Y_test.reshape(-1, 1)

#use ReLU
model.compile(optimizer=Adam(0.001), loss='mean_squared_error')
model.fit(X, Y, shuffle=True, batch_size=20, validation_split=0.4, epochs=20)

predictions = model.predict(X_test)
score = r2_score(Y_test, predictions)
print(predictions)
print(Y_test)
print("Score: ", score)
Example #25
0
        index = np.where(similarity == maximum)
        top_k.append(index[0][0])
        similarity[index] = -2
    np.save('{}_similarity.probs'.format(img_path), np.array(top_k))

    # Emoji scoring using emoji sense definitions
    similarity_defns = []
    for i in range(len(sense_embeddings)):
        prod = cosine_product(Final_image_embedding, definition_embeddings[i])
        similarity_defns.append(prod)
    similarity_defns = np.array(similarity_defns)
    top_k = []
    while len(top_k) <= 24:
        maximum = max(similarity_defns)
        index = np.where(similarity_defns == maximum)
        top_k.append(index[0][0])
        similarity_defns[index] = -2
    np.save('{}_similarity_defns.probs'.format(img_path), np.array(top_k))

if __name__ == '__main__':
    args = parser()

    # Test pretrained model
    model = model()
    model.compile()
    model.set_weights(args.weights_path)

    for img in os.listdir(args.img_dir):
        predict(model, img)

        ReduceLROnPlateau(monitor='val_loss',
                          factor=0.1,
                          patience=7,
                          min_lr=lr / 100),
        EarlyStopping(
            patience=
            9,  # Patience should be larger than the one in ReduceLROnPlateau
            min_delta=0.00001)
    ]
    # Set session and compile model
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))
    # If you are continuing an interrupted section, uncomment line bellow:
    #   model = keras.models.load_model(PATH_TO_PREV_MODEL, compile=False)
    model.compile(loss=loss, optimizer=opt)
    # Get annotations
    y = pd.read_csv(args.path_to_csv).values
    # Get tracings
    f = h5py.File(args.path_to_hdf5, "r")
    x = f[args.dataset_name]

    # Create log
    callbacks += [
        TensorBoard(log_dir='./logs', batch_size=batch_size,
                    write_graph=False),
        CSVLogger('training.log', append=False)
    ]  # Change append to true if continuing training
    # Save the BEST and LAST model
    callbacks += [
        ModelCheckpoint('./backup_model_last.hdf5'),
Example #27
0
from utils import load_train_test_data

train_inputs, test_inputs, train_output, test_output = load_train_test_data()

print('Will train with {} and test with {} samples'.format(
    len(train_inputs[0]), len(test_inputs[0])))

avg_winners = np.mean(train_output, axis=0)


def custom_loss(y_true, y_pred):
    normalized_error = (y_pred - y_true) / avg_winners
    return tf.reduce_mean(tf.math.square(normalized_error), axis=1)


model.compile(optimizer='adam', loss=[None, custom_loss])
model.fit(train_inputs,
          train_output,
          validation_data=(test_inputs, test_output),
          epochs=1000,
          callbacks=[
              tf.keras.callbacks.EarlyStopping('loss', patience=5),
              tf.keras.callbacks.TensorBoard(log_dir='logs/' +
                                             time.strftime('%Y%m%d%H%M%S'),
                                             histogram_freq=1)
          ])

model.save('results/model.h5', include_optimizer=False)
normal_probs, lucky_probs = model.get_layer('gather_probs_layer').get_probs()
normal_probs = pd.Series(normal_probs, index=np.arange(1, 50))
lucky_probs = pd.Series(lucky_probs, index=np.arange(1, 11))
Example #28
0
le.fit(y)
y = le.transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

with open("X_test.npy", "wb+") as f:
    np.save(f, X_test)

with open("y_test.npy", "wb+") as f:
    np.save(f, y_test)

logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

early_callback = keras.callbacks.EarlyStopping(monitor='val_loss',
                                               min_delta=1e-5,
                                               patience=5)
checkpoint_callback = keras.callbacks.ModelCheckpoint(
    filepath="best_so_far.h5", save_best_only=True)

model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics=["accuracy", "categorical_accuracy", f1])
model.fit(
    X_train,
    y_train,
    validation_split=0.2,
    batch_size=500,
    epochs=1000,
    callbacks=[tensorboard_callback, early_callback, checkpoint_callback])
model.save("model.h5")
Example #29
0
batch_size = 32
epochs = 3

if __name__ == '__main__':
    # Get data
    (X_1, y_1) = get_data_from_list(glob('images/positive/*'), 1)
    (X_2, y_2) = get_data_from_list(glob('images/negative/**/*.*'), 0)
    X = np.append(X_1, X_2, axis=0)
    y = np.append(y_1, y_2, axis=0)

    # important to do this, model.fit won't shuffle randomly
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3)

    # Init model structure
    model = model((200, 300, 3), 1)
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(X_val, y_val),
              callbacks=[WandbCallback(log_batch_frequency=10)])
    model.save('models/base_model_v13.h5')

    # Note:
    # Keras aggregates the train acc/loss of all batches per epoch.
    # val acc/loss is therefore better than reported train acc/loss
Example #30
0
import numpy as np
import random
from dataloader import CaptchaSequence
import string
characters = string.digits + string.ascii_uppercase
data = CaptchaSequence(characters, batch_size=2, steps=1000)

from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint
from tensorflow.keras.optimizers import *

train_data = CaptchaSequence(characters, batch_size=128, steps=1000)
valid_data = CaptchaSequence(characters, batch_size=128, steps=100)
callbacks = [
    EarlyStopping(patience=3),
    CSVLogger('cnn.csv'),
    ModelCheckpoint('cnn_best.h5', save_best_only=True)
]

from model import model
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(1e-3, amsgrad=True),
              metrics=['accuracy'])
model.fit_generator(train_data,
                    epochs=100,
                    validation_data=valid_data,
                    workers=4,
                    use_multiprocessing=True,
                    callbacks=callbacks)