Esempio n. 1
0
def train_segmentation_model_on_patch_paths(patch_paths, weights_path, config):
    '''
    Input: patch_paths, weights_path, config
    Output: trained segmentation model (saved to disk), training history
    '''
    # get train-val split
    train_patch_paths, val_patch_paths = get_train_val_scene_dirs(
        patch_paths, config)
    print('num. training images: ', len(train_patch_paths))
    print('num. validation images: ', len(val_patch_paths))

    # save train-val-split
    train_split_filepath = weights_path.split(
        '_weights.h5')[0] + '_train-val-split.json'
    with open(train_split_filepath, 'w') as f:
        train_split = {
            'train_scene_dirs': train_patch_paths,
            'val_scene_dirs': val_patch_paths,
        }
        json.dump(train_split, f, indent=4)

    # get datagen
    train_datagen_labels = config['training_params']['label_smoothing']
    train_datagen = datagen.SegmentationDataGenerator(
        train_patch_paths, config, labels=train_datagen_labels)
    val_datagen = datagen.SegmentationDataGenerator(val_patch_paths,
                                                    config,
                                                    labels='onehot')

    # get compiled model
    print('getting compiled densenet model...')
    label_encoder = land_cover_utils.get_label_encoder(config)
    loss = 'categorical_crossentropy'
    batch_size = config['fc_densenet_params']['batch_size']
    model = models.get_compiled_fc_densenet(config, label_encoder, loss=loss)

    # fit keras model
    print("Training keras model...")
    callbacks = models.get_callbacks(weights_path, config)
    history = model.fit_generator(
        train_datagen,
        epochs=config['training_params']['max_epochs'],
        validation_data=val_datagen,
        callbacks=callbacks,
        max_queue_size=batch_size,
        use_multiprocessing=config['training_params']['use_multiprocessing'],
        workers=config['training_params']['workers'])
    history = land_cover_utils.make_history_json_serializable(history.history)

    # save model history
    history_filepath = weights_path.split('_weights.h5')[0] + '_history.json'
    with open(history_filepath, 'w') as f:
        json.dump(history, f, indent=4)
    print("Model history saved to: ", history_filepath)
    return model, history
def training_stage(train_gen,
                   val_gen,
                   train_steps,
                   val_steps,
                   model_path,
                   tensorboard_dir,
                   args,
                   last_activation,
                   metrics,
                   loss,
                   X_train,
                   Y_train,
                   X_val,
                   Y_val,
                   weights_path=None):
    print("Creating model")
    sys.stdout.flush()
    reset_tensorflow()
    model = get_model(args.model_name,
                      dropout=args.dropout,
                      last_activation=last_activation,
                      activation=args.activation,
                      channels=3 if args.use_depth else None)
    optimizer = args.optimizer
    if optimizer == 'sgd':
        optimizer = SGD(momentum=args.momentum, decay=args.weight_decay)
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    if weights_path is not None:
        print("Loading weights")
        model.load_weights(weights_path)
    callbacks = get_callbacks(model_path,
                              args,
                              tensorboard_dir=tensorboard_dir)
    print("Fitting model")
    model.fit_generator(generator=train_gen,
                        steps_per_epoch=train_steps,
                        epochs=args.epochs,
                        verbose=2,
                        callbacks=callbacks,
                        validation_data=val_gen,
                        validation_steps=val_steps,
                        shuffle=args.shuffle)
    model.load_weights(model_path)
    print("Evaluating")
    res = model.evaluate(X_train, Y_train, verbose=0)
    print("Train loss:", res[0])
    print("Train mean IoU:", res[1])
    res = model.evaluate(X_val, Y_val, verbose=0)
    print("Test loss:", res[0])
    print("Test mean IoU:", res[1])
    return model
Esempio n. 3
0
def train_model_keras():
    """
      Train model using keras API
  """
    datagen = ImageGenerator(batch_size=64)
    generator = datagen.generator()
    model = models.build_graph(input_shape=datagen.shape,
                               output_dim=datagen.num_classes)
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(lr=0.01),
                  metrics=['accuracy', models.precison, models.recall])
    callbacks = models.get_callbacks()
    model.fit_generator(generator, steps_per_epoch=1000, callbacks=callbacks)
    results = models.validate(model)
    print(results)
Esempio n. 4
0
def train_segmentation_model_on_scene_dirs(scene_dirs, weights_path, config, \
    competition_mode=False, \
    predict_logits=False):
    '''
    Input: scene_dirs, weights_path, config
    save_label_counts =  config['training_params']['class_weight'] == 'balanced'
    Output: trained segmentation model (saved to disk), training history
    '''
    # get train, val scene dirs
    if competition_mode:
        print("Getting competition train/val split from holdout .csv file...")
        train_scene_dirs, val_scene_dirs = get_competition_train_val_scene_dirs(
            scene_dirs, config)
    else:
        print("Performing random train/val split...")
        train_scene_dirs, val_scene_dirs = get_train_val_scene_dirs(
            scene_dirs, config)
    print("train_scene_dirs: ", train_scene_dirs)
    print("val_scene_dirs: ", val_scene_dirs)
    print('num. training scenes: ', len(train_scene_dirs))
    print('num. validation scenes: ', len(val_scene_dirs))

    # save train-val-split
    train_split_filepath = weights_path.split(
        '_weights.h5')[0] + '_train-val-split.json'
    with open(train_split_filepath, 'w') as f:
        train_split = {
            'train_scene_dirs': train_scene_dirs,
            'val_scene_dirs': val_scene_dirs,
        }
        json.dump(train_split, f, indent=4)

    # get patch paths
    train_patch_paths = land_cover_utils.get_segmentation_patch_paths_for_scene_dirs(
        train_scene_dirs)
    val_patch_paths = land_cover_utils.get_segmentation_patch_paths_for_scene_dirs(
        val_scene_dirs)

    # set up data generators with label smoothing
    if config['training_params']['label_smoothing'] == 'kmeans':
        train_datagen_labels = 'kmeans'
        print('training with kmeans label smoothing...')
    else:
        train_datagen_labels = 'naive'
        label_smoothing_factor = config['training_params'][
            'label_smoothing_factor']
        print(
            f'training with naive label smoothing, factor={label_smoothing_factor}...'
        )
    train_datagen = datagen.SegmentationDataGenerator(
        train_patch_paths, config, labels=train_datagen_labels)
    val_datagen = datagen.SegmentationDataGenerator(val_patch_paths,
                                                    config,
                                                    labels='onehot')

    # get custom loss function
    label_encoder = land_cover_utils.get_label_encoder(config)
    if config['training_params']['class_weight'] == 'balanced':
        print('training with balanced loss...')
        class_weights = train_datagen.get_class_weights_balanced()
    else:
        print('training with unbalanced loss...')
        class_weights = None
    loss = models.get_custom_loss(label_encoder,
                                  class_weights,
                                  config,
                                  from_logits=predict_logits)

    # get compiled keras model
    if 'unet' in weights_path.lower():
        print('getting compiled unet model...')
        batch_size = config['unet_params']['batch_size']
        model = models.get_compiled_unet(config,
                                         label_encoder,
                                         loss=loss,
                                         predict_logits=predict_logits)
    else:
        print('getting compiled densenet model...')
        batch_size = config['fc_densenet_params']['batch_size']
        model = models.get_compiled_fc_densenet(config,
                                                label_encoder,
                                                loss=loss)

    # fit keras model
    print("Training keras model...")
    callbacks = models.get_callbacks(weights_path, config)
    history = model.fit_generator(
        train_datagen,
        epochs=config['training_params']['max_epochs'],
        validation_data=val_datagen,
        callbacks=callbacks,
        max_queue_size=batch_size,
        use_multiprocessing=config['training_params']['use_multiprocessing'],
        workers=config['training_params']['workers'])
    history = land_cover_utils.make_history_json_serializable(history.history)

    # save model history
    history_filepath = weights_path.split('_weights.h5')[0] + '_history.json'
    with open(history_filepath, 'w') as f:
        json.dump(history, f, indent=4)
    print("Model history saved to: ", history_filepath)
    return model, history
Esempio n. 5
0
from keras.applications.resnet50 import ResNet50 as ResNet, preprocess_input
from keras_preprocessing.image import ImageDataGenerator
import models
import numpy as np
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam
from sklearn.metrics import classification_report, confusion_matrix
from data import get_data_generators
from utils import *
from keras.models import load_model

batch_size = 8
image_size = 256
epochs = [5, 15]

callbacks = models.get_callbacks()

class_weights = {0: 0.5, 1: 1.0}

train_generator, val_generator, test_generator = get_data_generators(
    image_size, batch_size, 'GRY', 'abnormality', 'vgg')

# model = models.create_vgg_model(image_size=256, dropout=0.5)

#
# print("Stage 1 - Transfer Learning:")
#
# model.compile(loss='binary_crossentropy', optimizer=Adam(0.001), metrics=['accuracy'])
#
# history = model.fit(train_generator,
#                     steps_per_epoch=train_generator.samples // batch_size,
#                     epochs=epochs[0],
def gen_flow_for_two_inputs(X1, X2, y):
    genX1 = gen.flow(X1, y, batch_size=batch_size, seed=666)
    genX2 = gen.flow(X1, X2, batch_size=batch_size, seed=666)
    while True:
        X1i = genX1.next()
        X2i = genX2.next()
        #Assert arrays are equal - this was for peace of mind, but slows down training
        #np.testing.assert_array_equal(X1i[0],X2i[0])
        yield [X1i[0], X2i[1]], X1i[1]


gen_flow = gen_flow_for_two_inputs(X_train, X_angle_train, y_train)

file_path = "model_weights.hdf5"
callbacks = models.get_callbacks(filepath=file_path, patience=5)

model.fit_generator(gen_flow,
                    validation_data=([X_valid, X_angle_valid], y_valid),
                    steps_per_epoch=len(X_train) / 32,
                    epochs=25,
                    callbacks=modelcallbacks)

model.load_weights(filepath=file_path)

print("Train evaluate:")
print(
    model.evaluate([X_train, X_angle_train],
                   y_train,
                   verbose=1,
                   batch_size=200))
import pandas as pd
import os
from sklearn.metrics import classification_report, confusion_matrix
from utils import *
from data import get_data_generators
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam

image_size = 128
batch_size = 16
epoch = 30

train_generator, val_generator, test_generator = get_data_generators(image_size=image_size, batch_size=batch_size,
                                                                     data='RGB', task='abnormality', model=None)


callbacks = models.get_callbacks(folder="checkpoints/custom/")

class_weights = {0: 1.0,
                1: 1.0}


model = models.create_custom_model3(size=image_size)


model.compile(loss='binary_crossentropy', optimizer=Adam(0.001), metrics=['accuracy'])


history = model.fit(train_generator,
                    steps_per_epoch=train_generator.samples // batch_size,
                    epochs=epoch,
                    callbacks=callbacks,
Esempio n. 8
0
                                                train_ratio=0.8)

X_train, Y_train, X_test, Y_test = data

model = models.generate_conv_net_base(input_shape=X_train.shape[1:],
                                      nb_classes=nb_classes)
temp_fname = tempfile.mkstemp(suffix='.hdf5', dir='/data/tmp')[1]

### Training
model.fit(X_train,
          Y_train,
          batch_size=32,
          nb_epoch=1,
          shuffle=True,
          class_weight='auto',
          callbacks=models.get_callbacks(temp_fname))

### Testing
eval_result = model.evaluate(X_test, Y_test)

print(model.metrics_names)
print(eval_result)
pred_result = model.predict(X_test)
# print(pred_result)

## Making a model
# model.load_weights(temp_fname)
os.remove(temp_fname)

prediction_signature = tf.saved_model.signature_def_utils.predict_signature_def(
    {"image": model.input}, {"prediction": model.output})