Example #1
0
def model_train_validation(model_0, batch_size, patience, input_shape,
                           path_feature_data, indices_train, Y_train,
                           indices_validation, Y_validation, file_path_model,
                           filename_log, channel):
    """
    train the model with validation early stopping
    :param model_0:
    :param batch_size:
    :param patience:
    :param input_shape:
    :param path_feature_data:
    :param indices_train:
    :param Y_train:
    :param indices_validation:
    :param Y_validation:
    :param file_path_model:
    :param filename_log:
    :return:
    """

    callbacks = [
        ModelCheckpoint(file_path_model,
                        monitor='val_loss',
                        verbose=0,
                        save_best_only=True),
        EarlyStopping(monitor='val_loss', patience=patience, verbose=0),
        CSVLogger(filename=filename_log, separator=';')
    ]

    print("start training with validation...")

    steps_per_epoch_train = int(np.ceil(len(indices_train) / batch_size))
    steps_per_epoch_val = int(np.ceil(len(indices_validation) / batch_size))

    generator_train = generator(path_feature_data=path_feature_data,
                                indices=indices_train,
                                number_of_batches=steps_per_epoch_train,
                                file_size=batch_size,
                                input_shape=input_shape,
                                labels=Y_train,
                                multi_inputs=False,
                                channel=channel)
    generator_val = generator(path_feature_data=path_feature_data,
                              indices=indices_validation,
                              number_of_batches=steps_per_epoch_val,
                              file_size=batch_size,
                              input_shape=input_shape,
                              labels=Y_validation,
                              multi_inputs=False,
                              channel=channel)

    model_0.fit_generator(generator=generator_train,
                          steps_per_epoch=steps_per_epoch_train,
                          epochs=500,
                          validation_data=generator_val,
                          validation_steps=steps_per_epoch_val,
                          callbacks=callbacks,
                          verbose=2)
def model_train_schluter(model_0, batch_size, input_shape, path_feature_data,
                         indices_all, Y_train_validation, sample_weights,
                         class_weights, file_path_model, filename_log,
                         channel):

    # mmtm = MomentumScheduler(momentumIncrease)
    # lrSchedule = LearningRateScheduler(lrDecrease)
    # callbacks = [mmtm, lrSchedule, CSVLogger(filename=filename_log, separator=';')]
    callbacks = [CSVLogger(filename=filename_log, separator=';')]
    print("start training...")

    # train again use all train and validation set
    epochs_final = 100

    steps_per_epoch_train_val = int(np.ceil(len(indices_all) / batch_size))

    generator_train_val = generator(
        path_feature_data=path_feature_data,
        indices=indices_all,
        number_of_batches=steps_per_epoch_train_val,
        file_size=batch_size,
        input_shape=input_shape,
        labels=Y_train_validation,
        sample_weights=sample_weights,
        multi_inputs=False,
        channel=channel)

    model_0.fit_generator(
        generator=generator_train_val,
        steps_per_epoch=steps_per_epoch_train_val,
        epochs=epochs_final,
        callbacks=callbacks,
        # class_weight=class_weights,
        verbose=2)

    model_0.save(file_path_model)
def model_train(model_0, batch_size, patience, input_shape, path_feature_data,
                indices_train, Y_train, sample_weights_train,
                indices_validation, Y_validation, sample_weights_validation,
                indices_all, Y_train_validation, sample_weights, class_weights,
                file_path_model, filename_log):
    """train the model with validation early stopping and retrain the
    model with whole training dataset
    """

    model_0.save_weights(basename(file_path_model))

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=patience, verbose=0),
        CSVLogger(filename=filename_log, separator=';')
    ]
    print("start training...")

    steps_per_epoch_train = int(np.ceil(len(indices_train) / batch_size))
    steps_per_epoch_val = int(np.ceil(len(indices_validation) / batch_size))

    generator_train = generator(path_feature_data=path_feature_data,
                                indices=indices_train,
                                number_of_batches=steps_per_epoch_train,
                                file_size=batch_size,
                                input_shape=input_shape,
                                labels=Y_train,
                                sample_weights=sample_weights_train,
                                multi_inputs=False)
    generator_val = generator(path_feature_data=path_feature_data,
                              indices=indices_validation,
                              number_of_batches=steps_per_epoch_val,
                              file_size=batch_size,
                              input_shape=input_shape,
                              labels=Y_validation,
                              sample_weights=sample_weights_validation,
                              multi_inputs=False)

    history = model_0.fit_generator(generator=generator_train,
                                    steps_per_epoch=steps_per_epoch_train,
                                    epochs=500,
                                    validation_data=generator_val,
                                    validation_steps=steps_per_epoch_val,
                                    class_weight=class_weights,
                                    callbacks=callbacks,
                                    verbose=2)

    model_0.load_weights(basename(file_path_model))

    # train again use all train and validation set
    epochs_final = len(history.history['val_loss'])
    # epochs_final = 100

    steps_per_epoch_train_val = int(np.ceil(len(indices_all) / batch_size))

    generator_train_val = generator(
        path_feature_data=path_feature_data,
        indices=indices_all,
        number_of_batches=steps_per_epoch_train_val,
        file_size=batch_size,
        input_shape=input_shape,
        labels=Y_train_validation,
        sample_weights=sample_weights,
        multi_inputs=False)

    model_0.fit_generator(generator=generator_train_val,
                          steps_per_epoch=steps_per_epoch_train_val,
                          epochs=epochs_final,
                          class_weight=class_weights,
                          verbose=2)

    model_0.save(file_path_model)
    remove(basename(file_path_model))