Exemple #1
0
    def get_callbacks(self, validation_data, plot):
        callbacks = []
        if self.early_stopping and validation_data is not None:
            patience = self.early_stopping_rounds
            callbacks.append(
                EarlyStopping(monitor='val_loss',
                              mode='min',
                              verbose=self.verbose,
                              patience=patience))
            callbacks.append(
                GetBest(monitor='val_%s' % self.metrics[0],
                        verbose=self.verbose))

        if plot:
            from livelossplot.keras import PlotLossesCallback
            callbacks.append(PlotLossesCallback())

        if self.reduce_lr:
            patience = self.reduce_lr_patience or self.epochs // 100
            reduce_lr = ReduceLROnPlateau(factor=self.reduce_lr_factor,
                                          patience=patience,
                                          verbose=self.verbose)
            callbacks.append(reduce_lr)

        return callbacks
Exemple #2
0
def train_model(train_x, train_y, model_name="circle_model", augment=False, lr=1e-4, epochs=10, valid_split=0,
                early_stop=False, model=None):
    """
    Returns a trained model and the training history.

    :param input_path: path to all the input data (ie. train_X)
    :param output_path: path to all the output data (ie. train_y)
    :param augment: whether or not the augment the data
    :param model_name: name of the new model (weights will be saved under this name)
    :param lr: learning rate for model
    :param epochs: number of epochs to run for
    :param valid_split: percentage of data that should be used for validation
    :param early_stop: whether or not to stop early if the validation and training curves diverge too much
    :param model: an existing model to train
    """
    if model is None:
        model = circle_model((IMG_WIDTH, IMG_HEIGHT, 1))
        model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy", metrics=["accuracy"])

    # Setup training callbacks
    callbacks = []
    if early_stop:
        callbacks.append(EarlyStopping(monitor='val_loss', verbose=1, patience=50))
    callbacks.append(ModelCheckpoint(MAIN_DIR_PATH + "/" + model_name + ".hdf5", save_weights_only=True))
    callbacks.append(PlotLossesCallback())

    history = model.fit(train_x, train_y, epochs=epochs, validation_split=valid_split, callbacks=callbacks)

    plot_model_history(history)

    return model, history
Exemple #3
0
    def __init__(self, n_in=1, n_out=1, dropnan=True):
        ''' 依据前n_in个时间, 预测后n_out个时间 '''
        self.model = tensorflow.keras.Sequential()

        self.plot_losses = PlotLossesCallback()
        self.history = History()
        self.tbCallBack = tensorflow.keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
        self.save_model_img = 'image/series_lstm_multi_class.png'
        self.save_model_file = 'image/series_lstm_multi_class.h5'  # self.model.save("stock_lstm.h5")
        self.n_hours = 3
Exemple #4
0
                                 horizontal_flip=True)

train_generator = train_datagen.flow(np.array(X_train),
                                     Y_train,
                                     batch_size=batch_size)

validation_generator = val_datagen.flow(np.array(X_val),
                                        Y_val,
                                        batch_size=batch_size)

checkpointer = ModelCheckpoint(filepath='model.h5',
                               verbose=1,
                               save_best_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)

plot_losses = PlotLossesCallback()

model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples // batch_size,
                    epochs=20,
                    validation_data=validation_generator,
                    validation_steps=nb_validation_samples // batch_size,
                    callbacks=[checkpointer, es, plot_losses])

d = model.history.history

plt.plot(d['acc'])
plt.plot(d['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
Exemple #5
0
 def __init__(self, n_in=1, n_out=1, dropnan=True):
     ''' 依据前n_in个时间, 预测后n_out个时间 '''
     self.model = keras.Sequential()
     self.plot_losses = PlotLossesCallback()
     self.save_model_img = 'image/series_lstm.png'
     self.save_model_file = 'image/series_lstm.h5'  # self.model.save("stock_lstm.h5")
Exemple #6
0
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation("sigmoid"))

model.compile(loss="binary_crossentropy",
              optimizer="rmsprop",
metrics=["accuracy"])
filepath = "/home/abdullah/cacheFiles/weights.{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
model.fit_generator(
    training_generator,
    steps_per_epoch=len(training_generator.filenames) // BATCH_SIZE,
    epochs=1,
    validation_data=validation_generator,
    validation_steps=len(validation_generator.filenames) // BATCH_SIZE,
    callbacks=[checkpoint,PlotLossesCallback(), CSVLogger("training.log",
                                            append=False,
separator=";")])
model_json = model.to_json()
with open("/home/abdullah/cacheFiles/model.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
print("Saved model to disk")
model.save('/home/abdullah/cacheFiles/Activity.h5')

json_file = open('/home/abdullah/cacheFiles/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("/home/abdullah/cacheFiles/Activity.h5")
Exemple #7
0
model.add(keras.layers.Dense(4, activation=tf.nn.relu))
model.add(keras.layers.Dropout(0.4))

model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train,
          y_train,
          epochs=100,
          batch_size=32,
          validation_data=(X_test, y_test),
          callbacks=[PlotLossesCallback()],
          verbose=0,
          validation_split=0.5,
          shuffle=True)

y_pred = model.predict_classes(X_test)

score = model.evaluate(X_test, y_test, verbose=1)

print(score)

from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, cohen_kappa_score, auc

# Confusion matrix
print(confusion_matrix(y_test, y_pred))
x = Dropout(0.5)(x)

out = TimeDistributed(Dense(n_tags, activation="softmax"))(x)

model = Model(input_text, out)
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")

model.summary()

X_tr, X_val = X_tr[:1213*batch_size], X_tr[-135*batch_size:]
y_tr, y_val = y_tr[:1213*batch_size], y_tr[-135*batch_size:]
y_tr = y_tr.reshape(y_tr.shape[0], y_tr.shape[1], 1)
y_val = y_val.reshape(y_val.shape[0], y_val.shape[1], 1)

history = model.fit(np.array(X_tr), y_tr, validation_data=(np.array(X_val), y_val),
                    batch_size=batch_size, epochs=20, verbose=1, callbacks=[PlotLossesCallback()])

!pip install seqeval

from seqeval.metrics import precision_score, recall_score, f1_score, classification_report
X_te = X_te[:149*batch_size]
test_pred = model.predict(np.array(X_te), verbose=1)

idx2tag = {i: w for w, i in tags2index.items()}

def pred2label(pred):
    out = []
    for pred_i in pred:
        out_i = []
        for p in pred_i:
            p_i = np.argmax(p)
test_generator = test_data_generator.flow_from_directory(
    test_data_dir,
    target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
    batch_size=1,
    class_mode="binary",
    shuffle=False)

# Training
model.fit_generator(
    training_generator,
    steps_per_epoch=len(training_generator.filenames) // BATCH_SIZE,
    epochs=EPOCHS,
    validation_data=validation_generator,
    validation_steps=len(validation_generator.filenames) // BATCH_SIZE,
    callbacks=[
        PlotLossesCallback(),
        CSVLogger(TRAINING_LOGS_FILE, append=False, separator=";")
    ],
    verbose=1)
model.save_weights(MODEL_FILE)

probabilities = model.predict_generator(test_generator, TEST_SIZE)
for index, probability in enumerate(probabilities):
    image_path = test_data_dir + "/" + test_generator.filenames[index]
    img = mpimg.imread(image_path)
    plt.imshow(img)
    if probability > 0.5:
        plt.title("%.2f" % (probability[0] * 100) + "% dog")
    else:
        plt.title("%.2f" % ((1 - probability[0]) * 100) + "% cat")
    plt.show()
Exemple #10
0
 def __init__(self):
     self.model = keras.Sequential()
     self.plot_losses = PlotLossesCallback()
     self.save_model_img = 'image/fully_connected_network_model.png'
     self.save_model_file = 'model/fully_connected_network_model.h5'
Exemple #11
0
metrics=['MAPE','MAE'] 

#Custom loss as described in the paper
if use_customloss == True:
    loss    = pore_utils.custom_loss #imports custom loss
    metrics = [keras.losses.MAPE_c]  #custom MAPE
    # concatenates the porosity mask. This way, tf has access to it during training
    y_train = np.concatenate( (np.expand_dims(binary_mask,4),
                                                    y_train),axis=4)
else:
    loss = keras.losses.mean_absolute_error
        


optimizer     = keras.optimizers.Adam() # the default LR does the job
plot_losses   = PlotLossesCallback( 
                        fig_path=('savedModels/%s/metrics.png' % model_name) )    
nan_terminate = keras.callbacks.TerminateOnNaN()
early_stop    = keras.callbacks.EarlyStopping(monitor ='val_loss', min_delta=0, 
                                              patience=patience_training, 
                                              verbose=2, mode='auto', baseline=None,
                                              restore_best_weights=False)


# TF internals
#config        = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = mem_fraction 
#config.gpu_options.per_process_gpu_memory_fraction = 1 
#config.gpu_options.allow_growth = True
#set_session( tf.Session(config=config) )

 def __init__(self):
     self.model = keras.Sequential()
     self.plot_losses = PlotLossesCallback()
     self.save_model_img = 'image/convolutional_neural_network_model_mnist.png'
     self.save_model_file = 'model/convolutional_neural_network_model_mnist.h5'
Exemple #13
0
def train_model(training_generator,
                validation_generator,
                model_name="xception_model",
                augment=False,
                lr=1e-4,
                epochs=10,
                early_stop=False,
                fine_tune_from=-1,
                model=None):
    """
    Returns a trained model and the training history.

    :param training_generator: the training data generator
    :param validation_generator: the validation data generator
    :param augment: whether or not the augment the data
    :param model_name: name of the new model (weights will be saved under this name)
    :param lr: learning rate for model
    :param epochs: number of epochs to run for
    :param early_stop: whether or not to stop early if the validation and training curves diverge too much
    :param fine_tune_from: which layer to start fine tuning from (all layers before this layer will be frozen)
    :param model: an existing model to train
    """

    if model is None:
        model = get_model(15)

    # Fine Tuning
    if fine_tune_from >= 0:
        print(len(model.layers))

        for layer in model.layers:
            layer.trainable = False

        for layer in model.layers[fine_tune_from:]:
            layer.trainable = True

    model.compile(optimizer="nadam",
                  loss=keras.losses.categorical_crossentropy,
                  metrics=['accuracy'])

    # Setup training callbacks
    callbacks = []
    if early_stop:
        callbacks.append(
            keras.callbacks.EarlyStopping(monitor='val_loss',
                                          verbose=1,
                                          patience=50))
    callbacks.append(
        keras.callbacks.ModelCheckpoint(MAIN_DIR_PATH + "/" + model_name +
                                        ".h5",
                                        save_best_only=True))
    callbacks.append(PlotLossesCallback())

    model.summary()

    train_step_size = training_generator.n // training_generator.batch_size
    history = model.fit_generator(training_generator,
                                  validation_data=validation_generator,
                                  epochs=epochs,
                                  steps_per_epoch=train_step_size,
                                  callbacks=callbacks)

    return model, history
model = Model(inputs=[sentence_indices], outputs=X)
    

model.summary()

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

X_train_indices = sentences_to_indices(X_train, word_to_index, maxLen)
X_val_indices = sentences_to_indices(X_val, word_to_index, maxLen)
X_test_indices = sentences_to_indices(X_test, word_to_index, maxLen)
print(X_train_indices.shape)

earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto')

history=model.fit(X_train_indices, y=Y_oh_train, batch_size=512, epochs=300, 
          verbose=1, validation_data=(X_val_indices, Y_oh_val), callbacks=[earlystop,PlotLossesCallback()])

type(history.history['acc'])

z# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model validation accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model validation loss')