Esempio n. 1
0
    tf.keras.callbacks.ModelCheckpoint(model_folder+'/model_{epoch:02d}.h5', save_weights_only=True, save_best_only=False, mode='min'),
    tf.keras.callbacks.ReduceLROnPlateau(),
]

# train model
history = model.fit_generator(
    train_dataloader, 
    steps_per_epoch=len(train_dataloader), 
    epochs=EPOCHS, 
    callbacks=callbacks, 
    validation_data=valid_dataloader, 
    validation_steps=len(valid_dataloader),
)

# save the training information
plot_history(model_folder+'/train_history.png',history)
record_dir = model_folder+'/train_dir'
generate_folder(record_dir)
save_history(record_dir, history)

# evaluate model
test_dataset = Dataset(
    x_test_dir, 
    y_test_dir, 
    classes=CLASSES, 
    augmentation=get_validation_augmentation(test_dim),
    preprocessing=get_preprocessing(preprocess_input),
)

test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)
model = net_func(BACKBONE, encoder_weights=encoder_weights, input_shape = (test_dim, test_dim, 3), classes=n_classes, activation=activation)
Esempio n. 2
0
                                               '/best_model-{epoch:03d}.h5',
                                               monitor='val_mce',
                                               save_weights_only=True,
                                               save_best_only=True,
                                               mode='min'),
        ]
    model.compile(optim, loss, metrics)

history = model.fit_generator(
    train_dataloader,
    steps_per_epoch=len(train_dataloader),
    epochs=epochs,
    callbacks=callbacks,
    validation_data=valid_dataloader,
    validation_steps=len(valid_dataloader),
)

t_epochs = 10
if epochs > t_epochs:
    plot_history(model_dir + '/train.png', history, deeply, t_epochs)
# validate the performance
model_names = sorted(
    [mn for mn in os.listdir(model_dir) if mn.endswith('.h5')])
best_model_name = model_names[-1]
model.load_weights(model_dir + '/' + best_model_name)
scores = model.evaluate_generator(valid_dataloader)
print(model.metrics_names)
print(scores)
file_name = model_dir + '/val_mce.txt'
save_metrics(file_name, model.metrics_names, scores)