filters=32,
                             n_class=1)

    callbacks = [
        EarlyStopping(monitor='val_dice_coef',
                      patience=10,
                      verbose=1,
                      min_delta=1e-4,
                      mode='max'),
        ReduceLROnPlateau(monitor='val_dice_coef',
                          factor=0.2,
                          patience=5,
                          verbose=1,
                          epsilon=1e-4,
                          mode='max'),
        ModelCheckpoint(monitor='val_dice_coef',
                        filepath='model_weights.hdf5',
                        save_best_only=True,
                        mode='max')
    ]

    model.fit_generator(
        generator=train_generator(ids_train),
        steps_per_epoch=np.ceil(float(len(ids_train)) / float(BATCH_SIZE)),
        epochs=8,
        verbose=2,
        callbacks=callbacks,
        validation_data=valid_generator(ids_valid),
        validation_steps=np.ceil(float(len(ids_valid)) / float(BATCH_SIZE)))
#!==========================================================================================================
示例#2
0
            wordToVecMap[word.strip()] = loadedVectors[index]
            
    return wordToVecMap"""

datasetDirectory = "aclImdb/train/"
sequenceLength = 300
vecSpaceSize = 8

reviews, ratings = readDataset(datasetDirectory, sequenceLength, vecSpaceSize)
#Get embedded matrix representing vocabulary
wordsToIndex, tokenizer = generateWordMapping(reviews)
#Generate model and output summary
model = loadModel(len(wordsToIndex) + 1, sequenceLength, vecSpaceSize)
model.summary()
#Define weights checkpoint
filepath = "data/weights-{epoch:d}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, mode='min')

#Train the model
X = tokenizer.texts_to_sequences(reviews)
X = pad_sequences(X, maxlen=sequenceLength, padding='post')

#print(encodedReviews.shape)
model.fit(X,
          ratings,
          epochs=10,
          batch_size=32,
          shuffle=True,
          callbacks=[checkpoint, EvaluationCallback()])
print(model.metrics_names)
示例#3
0
    if num_of_samples % batch_size:
        one_extra = 1
    steps_per_epoch = num_of_samples // batch_size + one_extra
    print('STEPS PER EPOCH = '.format(steps_per_epoch))

    if 'Both' in run_with:
        model_full = build_network(X.shape,
                                   ending_images.shape,
                                   spikes_images_type='Both')
        print(model_full.summary())

        full_checkpoint_file = (join(
            data_folder, 'both_latest_model_SSTiter_{}.h5'.format(i)))
        full_checkpoint = ModelCheckpoint(full_checkpoint_file,
                                          monitor='val_loss',
                                          verbose=1,
                                          save_best_only=True,
                                          mode='min')
        full_callbacks_list = [full_checkpoint]
        model_history = model_full.fit_generator(
            gen,
            steps_per_epoch=steps_per_epoch,
            validation_data=([X_test,
                              starting_images_test], ending_images_test),
            epochs=300,
            callbacks=full_callbacks_list)
        model_full.save(
            join(data_folder, 'both_final_model_SSTiter_{}.h5'.format(i)))

    if 'Spikes' in run_with:
        model_spikes = build_network(X.shape,
示例#4
0
# In[ ]:

### Run training

# 7: Run training
epochs = 20

history = model.fit_generator(
    generator=train_generator,
    steps_per_epoch=ceil(n_train_samples / batch_size),
    epochs=epochs,
    callbacks=[
        ModelCheckpoint(
            './ssd300_weights_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
            monitor='val_loss',
            verbose=1,
            save_best_only=True,
            save_weights_only=True,
            mode='auto',
            period=1),
        LearningRateScheduler(lr_schedule),
        EarlyStopping(monitor='val_loss', min_delta=0.001, patience=2)
    ],
    validation_data=val_generator,
    validation_steps=ceil(n_val_samples / batch_size))

model_name = 'ssd300_0'
model.save('./{}.h5'.format(model_name))
model.save_weights('./{}_weights.h5'.format(model_name))

print()
print("Model saved as {}.h5".format(model_name))
model.add(Activation('relu'))
model.add(Dropout(0.45))

model.add(Dense(48))
model.add(Activation('relu'))
model.add(Dropout(0.5))

model.add(Dense(8))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer="Adam",
              metrics=['accuracy'])

check = ModelCheckpoint(filepath="best_model_check_ctf-adam-6-2-2_verify.hdf5",
                        monitor='val_loss',
                        verbose=1,
                        save_best_only=True)

model.fit(x_train,
          y_train,
          batch_size=8,
          epochs=68,
          callbacks=[earlyStopping, check],
          validation_data=(x_test, y_test))

### STEP 6

############# Finding probability threshold that gives maximum Fscore for individual classes
# Model Prediction on validation data
out = model.predict_proba(x_test)
out = np.array(out)
model = Model(inputs, outputs)
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

es = EarlyStopping(monitor='val_loss',
                   mode='min',
                   verbose=1,
                   patience=10,
                   min_delta=0.0001)
mc = ModelCheckpoint('best_model/best_model.h5',
                     monitor='val_acc',
                     verbose=1,
                     save_best_only=True,
                     mode='max')

history = model.fit(x_train,
                    y_train,
                    epochs=100,
                    callbacks=[es, mc],
                    batch_size=32,
                    validation_data=(x_val, y_val))

model.save('model.h5')
print('model saved')

#pyplot.plot(history.history['loss'], label='train')
#pyplot.plot(history.history['val_loss'], label='test')
示例#7
0
Autoencoder = Model(inputs=input_layer, outputs=decoder)

# Building epochs and batch_size

nb_epoch = 100
batch_size = 256

# Compile the Model
Autoencoder.compile(optimizer='adam',
                    loss='mean_squared_error',
                    metrics=['accuracy'])

# Fit the model

checkpointer = ModelCheckpoint(filepath="model.h5",
                               verbose=0,
                               save_best_only=True)

tensorboard = TensorBoard(log_dir='./logs',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=True)

# Implies X_train is both the input and output, which is required for reconstruction

history = Autoencoder.fit(X_train,
                          X_train,
                          epochs=nb_epoch,
                          batch_size=batch_size,
                          shuffle=True,
                          validation_data=(X_test, X_test),
示例#8
0
                                  freeze_body=2,
                                  weights_path=weights_path)
    else:
        model = create_model(
            input_shape,
            anchors,
            num_classes,
            freeze_body=2,
            weights_path=weights_path)  # make sure you know what you freeze

    log_dir_time = os.path.join(log_dir, "{}".format(int(time())))
    logging = TensorBoard(log_dir=log_dir_time)
    checkpoint = ModelCheckpoint(
        os.path.join(log_dir, "checkpoint.h5"),
        monitor="val_loss",
        save_weights_only=True,
        save_best_only=True,
        period=5,
    )
    reduce_lr = ReduceLROnPlateau(monitor="val_loss",
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor="val_loss",
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = FLAGS.val_split
    with open(FLAGS.annotation_file) as f:
        lines = f.readlines()
示例#9
0
        if use_lr_scheduler:
            indices = [(x, y) for x, y in self.lr_scheduler_plan.items()
                       if logs.get('binary_accuracy') >= x]
            if indices != []:
                K.set_value(model.optimizer.lr, indices[0][1])  # lr = y, value
                print(f'\nChanged learning rate to {indices[0][1]}')
                del self.lr_scheduler_plan[indices[0][0]]  # acc = x, key


#endregion

#region Fixing callbacks
# RUNNING IN ONEDRIVE OR DROPBOX SEEMS TO INDUCE PERMISSION DENIED SINCE IT WRITES SO FAST
checkpointer = ModelCheckpoint(
    filepath=os.getcwd() + '/' +
    'storage/checkpoint.h5',  # I need full path, or permission error
    verbose=0,
    save_best_only=True)
history_callback = history_recorder()
learning_rate_callback = learning_rate_scheduler()
learning_rate_callback.lr_scheduler_plan = lr_scheduler_plan
history_callback.config_plot_realtime = config_plot_realtime
#endregion

#region Loading model
# Try to load model
if load_saved_model or train_loaded_model:
    try:
        logger.info('Loading model from file...')
        model = load_model('results/Signal_peptide_model.h5'
                           )  # Looks like you can load model and keep training
示例#10
0
    dense1 = Dropout(0.5)(dense1)
    out = Dense(1, kernel_initializer='glorot_uniform',
                activation='sigmoid')(dense1)
    outs.append(out)

out = average(outs)

model = Model(outputs=out, inputs=inp)

model.compile(loss="binary_crossentropy",
              optimizer='Adam',
              metrics=["accuracy"])
model.summary()

checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.hdf5',
                               verbose=1,
                               save_best_only=True)

model.fit_generator(datagen.flow(X_train, y_train),
                    steps_per_epoch=len(X_train) / 12,
                    validation_data=(X_valid, y_valid),
                    epochs=70,
                    callbacks=[checkpointer],
                    verbose=1)

model.load_weights('saved_models/weights.best.hdf5')

X_test = X_test.astype('float32') / 30
labels = model.predict(X_test)

submission = pd.DataFrame({
示例#11
0
# Load model
weights_file = "weights/DenseNet-40-12-CIFAR10.h5"
if os.path.exists(weights_file):
    #model.load_weights(weights_file, by_name=True)
    print("Model loaded.")

out_dir = "weights/"

lr_reducer = ReduceLROnPlateau(monitor='val_acc',
                               factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               min_lr=1e-5)
model_checkpoint = ModelCheckpoint(weights_file,
                                   monitor="val_acc",
                                   save_best_only=True,
                                   save_weights_only=True,
                                   verbose=1)

callbacks = [lr_reducer, model_checkpoint]
try:
    if augment == 'true':
        print("Training with data augmentation...")
        model.fit_generator(generator.flow(trainX,
                                           Y_train,
                                           batch_size=batch_size),
                            steps_per_epoch=len(trainX) // batch_size,
                            epochs=nb_epoch,
                            callbacks=callbacks,
                            validation_data=(testX, Y_test),
                            validation_steps=testX.shape[0] // batch_size,
# set up generators
print("setting up generators...")
test_size = int(0.3 * len(parsed_sentences))
s_train, s_test = parsed_sentences[0:-test_size], parsed_sentences[-test_size:]
train_gen = generate_sentence_batch(s_train, word2id, 
                                    SEQUENCE_LEN, BATCH_SIZE)
test_gen = generate_sentence_batch(s_test, word2id, 
                                   SEQUENCE_LEN, BATCH_SIZE)

# train autoencoder
print("training autoencoder...")
num_train_samples = len(s_train) // BATCH_SIZE
num_test_samples = len(s_test) // BATCH_SIZE
checkpoint = ModelCheckpoint(filepath=os.path.join(
    DATA_DIR, "sent-thoughts-autoencoder.h5"),
    save_best_only=True)
history = autoencoder.fit_generator(train_gen, 
                                    samples_per_epoch=num_train_samples,
                                    nb_epoch=NUM_EPOCHS,
                                    validation_data=test_gen, 
                                    nb_val_samples=num_test_samples,
                                    callbacks=[checkpoint])

# saving history for charting (headless)
fchart = open(os.path.join(DATA_DIR, "sent-thoughts-loss.csv"), "wb")
trg_losses = history.history["loss"]
val_losses = history.history["val_loss"]
fchart.write("#loss\tval_loss\n")
for trg_loss, val_loss in zip(trg_losses, val_losses):
    fchart.write("{:.5f}\t{:.5f}\n".format(trg_loss, val_loss))
示例#13
0
def _main():
    annotation_path = 'annotations.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/tiny_yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw

    is_tiny_version = len(anchors) == 6  # default setting was 6
    print('Tiny Version: ' + str(is_tiny_version))
    if is_tiny_version:
        model = create_tiny_model(input_shape,
                                  anchors,
                                  num_classes,
                                  freeze_body=2,
                                  weights_path='model_data/yolo_weights.h5')
    else:
        model = create_model(input_shape,
                             anchors,
                             num_classes,
                             freeze_body=2,
                             weights_path='model_data/yolo_weights.h5'
                             )  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 32
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=5,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 32  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=100,
            initial_epoch=50,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')
示例#14
0
model.add(Dropout(0.15))
model.add(Dense(150, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(17, activation='softmax'))

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

es = EarlyStopping(monitor='val_loss', verbose=1, patience=15)

mc = ModelCheckpoint('chess-{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.model',
                     verbose=1,
                     period=1,
                     save_best_only=True,
                     save_weights_only=False,
                     monitor='val_loss')

start = time.time()

print(model.summary())

history = model.fit(train_x,
                    train_y,
                    epochs=500,
                    verbose=2,
                    validation_data=(test_x, test_y),
                    callbacks=[es, mc])

end = time.time()
示例#15
0
    # validation_data_dir = os.path.join('assets', 'Validation_Data')
    roi_validation_data_dir = os.path.join('assets', 'Validation_Data', 'ROIs')
    weights_path = os.path.join('assets', 'CNN_2_Best_Weights.hdf5')

    dataset_path = os.path.join(dataset_dir, 'ROI_Full_Dataset.npy')
    validation_data_path = os.path.join(roi_validation_data_dir, 'Valid_ROI_00076.npy')

    X_train, y_train = get_trainings_set(dataset_path)
    X_test, y_test = get_test_set(validation_data_path)

    epochs = 200
    batch_size = 1024

    # for i in range(3):
    # checkpoint
    checkpoint = ModelCheckpoint(weights_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]
    model = compile_cnn_model()

    # training
    history_callback = model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=batch_size, nb_epoch=epochs, show_accuracy=True, callbacks=callbacks_list)
    print("--- Training Completed %s seconds ---" % (time.time() - start_time))



    '''Evaluation'''
    for file in os.listdir(roi_validation_data_dir):

        valid_data_X_test, valid_data_y_test = get_test_set(os.path.join(roi_validation_data_dir, file))

        scores = model.evaluate(valid_data_X_test, valid_data_y_test, verbose=0)
else:
    fooling = "no-fooling"
## Implement callbacks
#### CSVLogger
date = str(datetime.datetime.now().year) + "-" + str(
    datetime.datetime.now().month) + "-" + str(datetime.datetime.now().day)
csv_name = date + "_{}-{}_{}_ratio-{}_{}".format(
    args.src_lang, args.tgt_lang, args.optimizer, args.ratio, fooling)
csv_path = os.path.join("./training_logs/csv/", csv_name)
logger = CSVLogger(filename=csv_path)
#### Model Checkpoint
cp_path = "./models/monitoring/" + date + "_{}-{}_{}_ratio-{}_{}_cp".format(
    args.src_lang, args.tgt_lang, args.optimizer, args.ratio, fooling)
checkpoint = ModelCheckpoint(filepath=cp_path,
                             monitor="val_loss",
                             save_best_only=True,
                             save_weights_only=False,
                             verbose=1)

callbacks_list = [logger, checkpoint]

if args.earlystopping:
    callbacks_list.append(
        EarlyStopping(patience=args.earlystopping_patience,
                      monitor="val_loss"))

if not args.callbacks:
    callbacks_list = []

## Fit
示例#17
0
n_patterns = len(dataX)
print "Total Patterns: ", n_patterns

# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))

# normalize
X = X / float(n_vocab)

# one hot encode the output variable
y = np_utils.to_categorical(dataY)

# define the LSTM model
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')

# define the checkpoint
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')
callbacks_list = [checkpoint]

# fit the model
model.fit(X, y, nb_epoch=20, batch_size=128, callbacks=callbacks_list)
示例#18
0
    train_set, test_set = sklearn.model_selection.train_test_split(
        dataset, test_size=0.2, random_state=0)
    test_set, validation_set = sklearn.model_selection.train_test_split(
        test_set, test_size=0.5, random_state=0)

    X_train, Y_train, D_train = arrange_data(train_set, "Training")
    X_val, Y_val, D_val = arrange_data(validation_set, "Validation")
    X_test, Y_test, D_test = arrange_data(test_set, "Test")

    es = EarlyStopping(monitor='val_loss',
                       mode='min',
                       verbose=1,
                       patience=int(settings['epochs'] / 20))
    mc = ModelCheckpoint(model_name,
                         monitor='val_loss',
                         mode='min',
                         verbose='1')

    if os.path.exists(model_name) and settings['load_latest']:
        model = keras.models.load_model(model_name)

        X = X_train[0]
        X = X.reshape([-1, OUT_HEIGHT, OUT_WIDTH, 1])
        Y = Y_train[0]
        Yp = model.predict([X])
        print("Test Network")
        print(Y)
        print(Yp)
        print(Y - Yp)
        if not settings['train_existing']:
            exit()
示例#19
0
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])

model.summary()

#Train

start_time = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')

history = model.fit_generator(train_generator,
                              epochs=70,
                              validation_data=val_generator,
                              callbacks=[
                                  ModelCheckpoint('models/%s.h5' %
                                                  (start_time),
                                                  monitor='val_acc',
                                                  save_best_only=True,
                                                  mode='max',
                                                  verbose=1),
                                  ReduceLROnPlateau(monitor='val_acc',
                                                    factor=0.2,
                                                    patience=10,
                                                    verbose=1,
                                                    mode='auto',
                                                    min_lr=1e-05)
                              ])

##history = model.fit(x, y, validation_split=0.25, epochs=50, batch_size=16, verbose=1)
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
示例#20
0
model.add(Dropout(0.5))

model.add(Dense(10, init="he_normal"))
model.add(ELU())
model.add(Dropout(0.5))

model.add(Dense(1, init="he_normal"))

# Set up hyperparameters
adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam, loss="mse")

# Create checkpoint at which model weights are to be saved
checkpoint = ModelCheckpoint("model.h5",
                             monitor='val_loss',
                             verbose=0,
                             save_best_only=True,
                             save_weights_only=False,
                             mode='auto')

# Train the model
model.fit_generator(train_gen,
                    samples_per_epoch=samples_per_epoch * batch_size,
                    nb_epoch=1,
                    callbacks=[checkpoint],
                    validation_data=validation_gen,
                    nb_val_samples=validation_data.shape[0])

# Save the model architecture
with open("model.json", "w") as file:
    file.write(model.to_json())
示例#21
0
from keras import optimizers

adam = optimizers.Adam(lr=0.000001,
                       beta_1=0.9,
                       beta_2=0.999,
                       epsilon=1e-08,
                       decay=0.99)

model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

# checkpoint (save the best model based validate loss)
# filepath = "./tmp/weights-improvement-{epoch:02d}-{val_loss:.2e}.hdf5"
filepath = "./tmp/weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min',
                             period=10)
callbacks_list = [checkpoint]

# fit the model
history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=2,
                    validation_split=vsplit,
                    callbacks=callbacks_list)

# score = model.evaluate(x_test, y_test,
#                        batch_size=batch_size, verbose=1)
示例#22
0
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(4, 4)))

model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(optimizer='adam' , loss='categorical_crossentropy', metrics = ['acc'])

keras_callbacks = [
      EarlyStopping(monitor='val_loss', patience=10, mode='min', min_delta=0.0001),
      ModelCheckpoint('model_checkpoint.h5', monitor='val_loss', save_best_only=True, mode='min')
]

model.summary()

History = model.fit_generator(
    train_generator,
    steps_per_epoch = train_generator.samples // batch_size,
    validation_data = validation_generator,
    validation_steps = validation_generator.samples // batch_size,
    epochs = epochs, callbacks= keras_callbacks, verbose=1)

model.save('model/my_model.h5')
model.save('model.h5')

plt.plot(History.history['acc'])
示例#23
0
    num_val_images = utils.get_num_files(VAL_DIR)

    def lr_decay(epoch):
        if epoch % 20 == 0 and epoch != 0:
            lr = K.get_value(model.optimizer.lr)
            K.set_value(model.optimizer.lr, lr / 2)
            print("LR changed to {}".format(lr / 2))
        return K.get_value(model.optimizer.lr)

    learning_rate_schedule = LearningRateScheduler(lr_decay)

    # setup checkpoints
    csv_logger = CSVLogger(os.path.join(OUT_DIR, "log.csv"), append=True, separator=";")

    latest_filepath = os.path.join(OUT_DIR, args.model + "_model_latest.h5")
    latest_checkpoint = ModelCheckpoint(latest_filepath, monitor="accuracy", verbose=1)

    best_filepath = os.path.join(OUT_DIR, args.model + "_model_best.h5")
    best_checkpoint = ModelCheckpoint(
        best_filepath,
        monitor="val_accuracy",
        verbose=1,
        save_best_only=True,
        mode="max",
    )

    change_transfer_strategy = FixedThenFinetune(args.from_epoch + args.num_epochs)

    callbacks_list = [
        csv_logger,
        change_transfer_strategy,
示例#24
0
def siso_regression_tut(gpu_id: int, dataset: str, frac: float,
                        validation_split: float, preprocessor: str,
                        batch_size: int, epochs: int, optimizer: str,
                        dropout: float, corruption_level: float,
                        dae_hidden_layers: list, sdae_hidden_layers: list,
                        cache: bool, regression_hidden_layers: list,
                        verbose: int):
    """Multi-floor indoor localization based on three-dimensional regression of
    location coordinates using a single-input and single-output (SISO) deep
    neural network (DNN) model and TUT datasets.

    Keyword arguments:

    """

    ### initialize numpy, random, TensorFlow, and keras
    np.random.seed()  # based on current time or OS-specific randomness source
    rn.seed()  #  "
    tf.set_random_seed(rn.randint(0, 1000000))
    if gpu_id >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ''
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)

    ### load datasets after scaling
    print("Loading data ...")
    if dataset == 'tut':
        from tut import TUT
        tut = TUT(cache=cache,
                  frac=frac,
                  preprocessor=preprocessor,
                  classification_mode='hierarchical',
                  grid_size=0)
    elif dataset == 'tut2':
        from tut import TUT2
        tut = TUT2(cache=cache,
                   frac=frac,
                   preprocessor=preprocessor,
                   classification_mode='hierarchical',
                   grid_size=0,
                   testing_split=0.2)
    elif dataset == 'tut3':
        from tut import TUT3
        tut = TUT3(cache=cache,
                   frac=frac,
                   preprocessor=preprocessor,
                   classification_mode='hierarchical',
                   grid_size=0)
    else:
        print("'{0}' is not a supported data set.".format(dataset))
        sys.exit(0)
    flr_height = tut.floor_height
    training_df = tut.training_df
    training_data = tut.training_data
    testing_df = tut.testing_df
    testing_data = tut.testing_data

    ### build and train a SIMO model
    print(
        "Building and training a SISO model for three-dimensional regression ..."
    )
    rss = training_data.rss_scaled
    coord = training_data.coord_3d_scaled
    coord_scaler = training_data.coord_3d_scaler  # for inverse transform
    labels = training_data.labels
    input = Input(shape=(rss.shape[1], ), name='input')  # common input

    # (optional) build deep autoencoder or stacked denoising autoencoder
    if dae_hidden_layers != '':
        print("- Building a DAE model ...")
        model = deep_autoencoder(dataset=dataset,
                                 input_data=rss,
                                 preprocessor=preprocessor,
                                 hidden_layers=dae_hidden_layers,
                                 cache=cache,
                                 model_fname=None,
                                 optimizer=optimizer,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 validation_split=validation_split)
        x = model(input)
    elif sdae_hidden_layers != '':
        print("- Building an SDAE model ...")
        model = sdae(dataset=dataset,
                     input_data=rss,
                     preprocessor=preprocessor,
                     hidden_layers=sdae_hidden_layers,
                     cache=cache,
                     model_fname=None,
                     optimizer=optimizer,
                     corruption_level=corruption_level,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_split=validation_split)
        x = model(input)
    else:
        x = input

    # regression hidden layers
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(dropout)(x)
    if regression_hidden_layers != '':
        for units in regression_hidden_layers:
            x = Dense(units)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Dropout(dropout)(x)

    # coordinates regression output
    x = Dense(coord.shape[1], kernel_initializer='normal')(x)
    x = BatchNormalization()(x)
    coordinates_output = Activation('linear', name='coordinates_output')(
        x)  # 'linear' activation

    model = Model(inputs=input, outputs=coordinates_output)
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',
                  metrics=['mean_squared_error'])
    weights_file = os.path.expanduser("~/tmp/best_weights.h5")
    checkpoint = ModelCheckpoint(weights_file,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 verbose=0)
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               verbose=0)

    print("- Training a coordinates regressor ...", end='')
    startTime = timer()
    history = model.fit(x={'input': rss},
                        y={'coordinates_output': coord},
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=verbose,
                        callbacks=[checkpoint, early_stop],
                        validation_split=validation_split,
                        shuffle=True)
    elapsedTime = timer() - startTime
    print(" completed in {0:.4e} s".format(elapsedTime))
    model.load_weights(weights_file)  # load weights from the best model

    ### evaluate the model
    print("Evaluating the model ...")
    rss = testing_data.rss_scaled
    labels = testing_data.labels
    flrs = labels.floor
    coord = testing_data.coord_3d  # original coordinates

    # calculate the classification accuracies and localization errors
    coords_scaled_pred = model.predict(rss, batch_size=batch_size)
    coord_est = coord_scaler.inverse_transform(
        coords_scaled_pred)  # inverse-scaling
    tmp = np.maximum(np.minimum(coord_est[:, 2], 4 * tut.floor_height),
                     0)  # clamping to [0, 4*tut.floor_height]
    flrs_pred = np.floor(
        tmp / tut.floor_height + 0.5
    )  # floor number (0..4); N.B. round() behavior in Python 3 has been changed,so we cannot use it.
    flr_results = (np.equal(np.argmax(flrs, axis=1), flrs_pred)).astype(int)
    flr_acc = flr_results.mean()

    # calculate 2D localization errors
    dist_2d = norm(coord - coord_est, axis=1)
    mean_error_2d = dist_2d.mean()
    median_error_2d = np.median(dist_2d)

    # calculate 3D localization errors
    flr_diff = np.absolute(np.argmax(flrs, axis=1) - flrs_pred)
    z_diff_squared = (flr_height**2) * np.square(flr_diff)
    dist_3d = np.sqrt(
        np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
    mean_error_3d = dist_3d.mean()
    median_error_3d = np.median(dist_3d)

    LocalizationResults = namedtuple('LocalizationResults', [
        'flr_acc', 'mean_error_2d', 'median_error_2d', 'mean_error_3d',
        'median_error_3d', 'elapsedTime'
    ])
    return LocalizationResults(flr_acc=flr_acc,
                               mean_error_2d=mean_error_2d,
                               median_error_2d=median_error_2d,
                               mean_error_3d=mean_error_3d,
                               median_error_3d=median_error_3d,
                               elapsedTime=elapsedTime)
示例#25
0
    def train(self, epochs, learning_rate, batch_size, wmap, vbal, model_name,
              new_ex):

        if ".hdf5" in model_name:
            model_name = model_name.split(".hdf5")[0]
        else:
            pass

        print("Loading data")

        if wmap == False:
            imgs_train, imgs_mask_train = self.load_data(wmap=wmap, vbal=vbal)
        else:
            imgs_train, imgs_mask_train, img_weights = self.load_data(
                wmap=wmap, vbal=vbal)

        print("Loading data done")

        model = self.get_mitosegnet(wmap, learning_rate)
        print("Got MitoSegNet")

        print(self.path + os.sep + model_name)

        if os.path.isfile(self.path + os.sep + model_name + ".hdf5"):

            model.load_weights(self.path + os.sep + model_name + ".hdf5")
            print("Loading weights")

        else:
            print(
                "No previously optimized weights were loaded. Proceeding without"
            )

        # Set network weights saving mode.
        # save previously established network weights (saving model after every epoch)

        print('Fitting model...')

        if new_ex == "New":

            first_ep = 0
            model_name = model_name + "_" + str(self.img_rows) + "_"

        elif new_ex == "Finetuned_New":

            first_ep = 0

        else:

            if os.path.isfile(self.path + os.sep + model_name +
                              'training_log.csv'):

                prev_csv = True

                prev_csv_file = pd.read_csv(self.path + os.sep + model_name +
                                            'training_log.csv')
                first_ep = len(prev_csv_file)

                if prev_csv_file.shape[1] > 7:
                    prev_csv_file = prev_csv_file.drop(
                        prev_csv_file.columns[[0]], axis=1)

            else:

                prev_csv = False

        csv_logger = CSVLogger(self.path + os.sep + model_name +
                               'training_log.csv')

        tensorboard = TensorBoard(log_dir=self.path + os.sep +
                                  "logs/{}".format(time()))

        # Set callback functions to early stop training and save the best model so far
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=20),
            ModelCheckpoint(filepath=self.path + os.sep + model_name + ".hdf5",
                            monitor='val_loss',
                            verbose=1,
                            save_best_only=True), csv_logger, tensorboard
        ]

        if wmap == True:
            x = [imgs_train, img_weights]
        else:
            x = imgs_train

        print(
            "\nCopy the line below into the terminal, press enter and click on the link to evaluate the training "
            "performance:\n\ntensorboard --logdir=" + self.path + os.sep +
            "logs/\n")

        ### ECG see if this solves
        # https://github.com/tensorflow/tensorflow/issues/34944
        tf.config.experimental_run_functions_eagerly(True)
        ###

        model.fit(x=x,
                  y=imgs_mask_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  verbose=1,
                  validation_split=0.2,
                  shuffle=True,
                  callbacks=callbacks)

        csv_file = pd.read_csv(self.path + os.sep + model_name +
                               'training_log.csv')

        if new_ex == "New" or new_ex == "Finetuned_New":

            csv_file["epoch"] = list(range(1, len(csv_file) + 1))
            last_ep = len(csv_file)

        if new_ex == "Existing" and prev_csv == True:

            frames = [prev_csv_file, csv_file]
            merged = pd.concat(frames, names=[])

            merged["epoch"] = list(range(1, len(merged) + 1))

            last_ep = len(merged)

            merged.to_csv(self.path + os.sep + model_name + 'training_log.csv')

        if new_ex == "New":

            info_file = open(
                self.path + os.sep + model_name + str(first_ep) + "-" +
                str(last_ep) + "_train_info.txt", "w")
            info_file.write("Learning rate: " + str(learning_rate) +
                            "\nBatch size: " + str(batch_size) +
                            "\nClass balance weight factor: " + str(vbal))
            info_file.close()

        K.clear_session()
示例#26
0
 def train(self, 
           train_files,
           val_files,
           test_file,
           batch_size=16,
           epochs=3,
           steps_per_epoch=1000,
           test_steps=8,
           save=True,
           k=8):
     """ Training function for SliceNet model
     Args:
         train_files: List of training hdf5 files as strings. Each hdf5 file
                      contains examples that will be batched to the network
         val_files: List of validation hdf5 files as strings. Each hdf5 file
                      contains examples that will be batched to the network
         test_file: Single HDF5 file containing test examples
         batch_size: Number of training examples per batch
         epochs: Number of epochs to train over data
         steps_per_epoch: Number of batches per epoch
         test_steps: Number of batches to test after each epoch
         save: Boolean indicating that model weights will be 
               saved periodically during training
         k: K-value for Pk score when evaluating test set.
            see 'postprocess.py' for more details on Pk score
     Return:
         history: Keras training history object
         pk: Pk metric object containing pk scores throughout training
     """
     # Define batch generator for training and validation
     trainGen = batchGen(train_files, batch_size, self.maxlen, classification=self.classification)
     valGen = batchGen(val_files, batch_size, self.maxlen, classification=self.classification)
     
     self.model.summary()
     
     print('Starting Training')
     with tf.Session() as sess:
         # Integrate keras session with tensorflow
         K.set_session(sess)
         initOp = [tf.global_variables_initializer(),
                   tf.initializers.tables_initializer()]
         sess.run(initOp)
         
         if self.pretrain:
             self.model.load_weights(self.weights_path)
             
         # Define model callbacks
         save_weights = ModelCheckpoint('./models/weights_epoch{epoch:03d}.h5', 
                                      save_weights_only=True, period=2)
         pk = pkHistory(test_file=test_file, num_samples=test_steps, k=k)
         
         # Train network
         history = self.model.fit_generator(trainGen,
                                       steps_per_epoch=steps_per_epoch,
                                       epochs=epochs,
                                       verbose=1,
                                       validation_data=valGen,
                                       validation_steps=1,
                                       callbacks=[save_weights, pk])
                     
         if save:
             # Serialize weights to HDF5
             self.model.save_weights('./models/weights_final.h5')
             print("Saved weights to disk")
             
     return history, pk
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=True)
generator.fit(train_X)







earlystopper = EarlyStopping(monitor='val_accuracy', patience=25, verbose=1)
checkpointer = ModelCheckpoint('./best_model.h5'
                                ,monitor='val_accuracy'
                                ,verbose=1
                                ,save_best_only=True
                                ,save_weights_only=True)
model=Sequential()
model.add(Conv2D(32, kernel_size=3, input_shape=(img_width, img_height,3), activation='relu', padding='same'))
model.add(MaxPool2D(2))
model.add(Dropout(0.15))
model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
model.add(Dropout(0.15))
model.add(Flatten())
model.add(Dense(train_y.columns.size, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

steps = np.round(train_X.shape[0] / 256, 0)
training = model.fit_generator(generator.flow(train_X,train_y, batch_size=256)
                        ,epochs=200
model.add(Activation('relu'))
model.add(Dropout(0.50))
model.add(Dense(num_classes))
model.add(Activation('sigmoid'))

model.load_weights('./saved_models/keras_nus_trained_weight_skenario2_10DO.h5')
opt = keras.optimizers.Adam(lr=1e-4)

model.compile(loss='binary_crossentropy',
              optimizer=opt,
              metrics=['top_k_categorical_accuracy'])
print(model.summary())
checkpoint = ModelCheckpoint("./saved_models/skenario22_2.h5",
                             monitor='val_top_k_categorical_acc',
                             verbose=1,
                             save_best_only=True,
                             save_weights_only=False,
                             mode='auto',
                             period=1)
early = EarlyStopping(monitor='val_acc',
                      min_delta=0,
                      patience=10,
                      verbose=1,
                      mode='auto')
if not data_augmentation:
    print('Not using data augmentation.')
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_split=0.3,
    def make(self, cf, valid_gen):
        cb = []

        # Jaccard callback
        if cf.dataset.class_mode == 'segmentation':
            print('   Jaccard metric')
            cb += [Jacc_new(cf.dataset.n_classes)]

        # Save image results
        if cf.save_results_enabled:
            print('   Save image result')
            cb += [Save_results(n_classes=cf.dataset.n_classes,
                                void_label=cf.dataset.void_class,
                                save_path=cf.savepath,
                                generator=valid_gen,
                                epoch_length=int(
                                    math.ceil(cf.save_results_nsamples / float(cf.save_results_batch_size))),
                                color_map=cf.dataset.color_map,
                                classes=cf.dataset.classes,
                                tag='valid')]

        # Early stopping
        if cf.earlyStopping_enabled:
            print('   Early stopping')
            cb += [EarlyStopping(monitor=cf.earlyStopping_monitor,
                                 mode=cf.earlyStopping_mode,
                                 patience=cf.earlyStopping_patience,
                                 verbose=cf.earlyStopping_verbose)]

        # Define model saving callbacks
        if cf.checkpoint_enabled:
            print('   Model Checkpoint')
            cb += [ModelCheckpoint(filepath=os.path.join(cf.savepath, "weights.hdf5"),
                                   verbose=cf.checkpoint_verbose,
                                   monitor=cf.checkpoint_monitor,
                                   mode=cf.checkpoint_mode,
                                   save_best_only=cf.checkpoint_save_best_only,
                                   save_weights_only=cf.checkpoint_save_weights_only)]

        # Plot the loss after every epoch.
        if cf.plotHist_enabled:
            print('   Plot per epoch')
            cb += [History_plot(cf.dataset.n_classes, cf.savepath,
                                cf.train_metrics, cf.valid_metrics,
                                cf.best_metric, cf.best_type, cf.plotHist_verbose)]

        # Decay learning rate at specific epochs
        if cf.lrDecayScheduler_enabled:
            print('   Learning rate decay scheduler (Deprecated)')
            cb += [LRDecayScheduler(cf.lrDecayScheduler_epochs, cf.lrDecayScheduler_rate)]

        # Save the log
        cb += [CSVLogger(os.path.join(cf.savepath, 'logFile.csv'),
                         separator=',', append=False)]

        # Learning rate scheduler
        if cf.LRScheduler_enabled:
            print('   Learning rate scheduler by batch')
            scheduler = Scheduler(cf.LRScheduler_type, cf.learning_rate,
                                  cf.LRScheduler_M, cf.LRScheduler_decay,
                                  cf.LRScheduler_S, cf.LRScheduler_power)

            if cf.LRScheduler_batch_epoch == 'batch':
                cb += [LearningRateSchedulerBatch(scheduler.scheduler_function)]
            elif cf.LRScheduler_batch_epoch == 'epoch':
                cb += [LearningRateScheduler(scheduler.scheduler_function)]
            else:
                raise ValueError('Unknown scheduler mode: ' + LRScheduler_batch_epoch)

        # TensorBoard callback
        if cf.TensorBoard_enabled:
            print('   Tensorboard')
            if cf.TensorBoard_logs_folder is None:
                log_dir = os.path.join(cf.usr_path, 'TensorBoardLogs')
            if not os.path.exists(log_dir):
                os.makedirs(log_dir)
            cb += [TensorBoard(log_dir=log_dir,
                               histogram_freq=cf.TensorBoard_histogram_freq,
                               write_graph=cf.TensorBoard_write_graph,
                               write_images=cf.TensorBoard_write_images)]

        # ElapsedTime Callback
        print('   Elapsed Time')
        cb += [ElapsedTime()]

        # Output the list of callbacks
        return cb
                                             class_mode='categorical',
                                             shuffle=False)

validation_generator = datagen.flow_from_directory(val_data_dir,
                                                   target_size=(img_width, img_height),
                                                   batch_size=batch_size,
                                                   class_mode='categorical')

tbCallBack = TensorBoard(log_dir=log_dir + 'tboriginal',
                         histogram_freq=10,
                         write_graph=True,
                         profile_batch=0)

save_callback = ModelCheckpoint(filepath=weights_dir + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                                monitor='val_acc',
                                verbose=1,
                                save_best_only=True,
                                mode='max')

earlyCallBack = EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=4,
                              verbose=1,
                              mode='min',
                              baseline=None,
                              restore_best_weights=True)

history = new_model.fit_generator(train_generator,
                                  steps_per_epoch=(int(400 // batch_size) + 1),
                                  epochs=number_of_epoch,
                                  validation_data=validation_generator,