コード例 #1
0
def text_classification_query(self,
                              instruction,
                              drop=None,
                              preprocess=True,
                              label_column=None,
                              test_size=0.2,
                              random_state=49,
                              learning_rate=1e-2,
                              epochs=20,
                              monitor="val_loss",
                              batch_size=32,
                              max_text_length=200,
                              max_features=20000,
                              generate_plots=True,
                              save_model=False,
                              save_path=os.getcwd()):
    """
    function to apply text_classification algorithm for sentiment analysis
    :param many params: used to hyperparametrize the function.
    :return a dictionary object with all of the information for the algorithm.
    """

    if test_size < 0:
        raise Exception("Test size must be a float between 0 and 1")

    if test_size >= 1:
        raise Exception(
            "Test size must be a float between 0 and 1 (a test size greater than or equal to 1 results in no training "
            "data)")

    if epochs < 1:
        raise Exception(
            "Epoch number is less than 1 (model will not be trained)")

    if batch_size < 1:
        raise Exception("Batch size must be equal to or greater than 1")

    if max_text_length < 1:
        raise Exception("Max text length must be equal to or greater than 1")

    if save_model:
        if not os.path.exists(save_path):
            raise Exception("Save path does not exists")

    if test_size == 0:
        testing = False
    else:
        testing = True

    data = DataReader(self.dataset)
    data = data.data_generator()

    if preprocess:
        data.fillna(0, inplace=True)

    if drop is not None:
        data.drop(drop, axis=1, inplace=True)

    if label_column is None:
        label = "label"
    else:
        label = label_column

    X, Y, target = get_target_values(data, instruction, label)
    Y = np.array(Y)
    classes = np.unique(Y)

    logger("->", "Target Column Found: {}".format(target))

    vocab = {}
    if preprocess:
        logger("Preprocessing data")
        X = lemmatize_text(text_clean_up(X.array))
        vocab = X
        X = encode_text(X, X)

    X = np.array(X)

    model = get_keras_text_class(max_features, len(classes), learning_rate)
    logger("Building Keras LSTM model dynamically")

    X_train, X_test, y_train, y_test = train_test_split(
        X, Y, test_size=test_size, random_state=random_state)

    X_train = sequence.pad_sequences(X_train, maxlen=max_text_length)
    X_test = sequence.pad_sequences(X_test, maxlen=max_text_length)

    y_vals = np.unique(np.append(y_train, y_test))
    label_mappings = {}
    for i in range(len(y_vals)):
        label_mappings[y_vals[i]] = i
    map_func = np.vectorize(lambda x: label_mappings[x])
    y_train = map_func(y_train)
    y_test = map_func(y_test)

    logger("Training initial model")

    # early stopping callback
    es = EarlyStopping(monitor=monitor, mode='auto', verbose=0, patience=5)

    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        batch_size=batch_size,
                        epochs=epochs,
                        callbacks=[es],
                        verbose=0)

    logger(
        "->", "Final training loss: {}".format(
            history.history["loss"][len(history.history["loss"]) - 1]))
    if testing:
        logger(
            "->", "Final validation loss: {}".format(
                history.history["val_loss"][len(history.history["val_loss"]) -
                                            1]))
        logger(
            "->", "Final validation accuracy: {}".format(
                history.history["val_accuracy"][
                    len(history.history["val_accuracy"]) - 1]))
        losses = {
            'training_loss': history.history['loss'],
            'val_loss': history.history['val_loss']
        }
        accuracy = {
            'training_accuracy': history.history['accuracy'],
            'validation_accuracy': history.history['val_accuracy']
        }
    else:
        logger("->",
               "Final validation loss: {}".format("0, No validation done"))
        losses = {'training_loss': history.history['loss']}
        accuracy = {'training_accuracy': history.history['accuracy']}

    plots = {}
    if generate_plots:
        # generates appropriate classification plots by feeding all
        # information
        logger("Generating plots")
        plots = generate_classification_plots(history, X, Y, model, X_test,
                                              y_test)

    if save_model:
        save(model, save_model, save_path=save_path)

    logger(
        "Storing information in client object under key 'text_classification'")
    # storing values the model dictionary

    self.models["text_classification"] = {
        "model": model,
        "classes": classes,
        "plots": plots,
        "target": Y,
        "vocabulary": vocab,
        "interpreter": label_mappings,
        "max_text_length": max_text_length,
        'test_data': {
            'X': X_test,
            'y': y_test
        },
        'losses': losses,
        'accuracy': accuracy
    }
    clearLog()
    return self.models["text_classification"]
コード例 #2
0
ファイル: supplementaries.py プロジェクト: wjjmjh/libra
def tune_helper(model_to_tune=None,
                dataset=None,
                models=None,
                max_layers=10,
                min_layers=2,
                min_dense=32,
                max_dense=512,
                executions_per_trial=3,
                max_trials=1,
                activation='relu',
                loss='categorical_crossentropy',
                metrics='accuracy',
                seed=42,
                objective='val_accuracy',
                generate_plots=True,
                directory='my_dir',
                epochs=10,
                step=32,
                patience=1,
                verbose=0,
                test_size=0.2):
    '''
    Helper function that calls the appropriate tuning function
    :param instruction: the objective that you want to reduce dimensions to maximize
    :return the updated models dictionary
    '''
    print("")
    logger("Getting target model for tuning...")

    # checks to see which requested model is in the self.models

    # processing for regression feed forward NN
    if model_to_tune == 'regression_ANN':
        logger("Reading in data")
        logger("Tuning model hyperparameters...")
        dataReader = DataReader(dataset)
        data = dataReader.data_generator()
        target = models['regression_ANN']['target']
        target_column = data[models['regression_ANN']['target']]
        data = models['regression_ANN']['preprocesser'].transform(
            data.drop(target, axis=1))
        returned_model, returned_pms, history, X_test, y_test = tuneReg(
            data.values,
            target_column.values,
            max_layers=max_layers,
            min_layers=min_layers,
            min_dense=min_dense,
            max_dense=max_dense,
            executions_per_trial=executions_per_trial,
            max_trials=max_trials,
            epochs=epochs,
            activation=activation,
            step=step,
            directory=directory,
            verbose=verbose,
            test_size=test_size)
        plots = {}
        logger("->",
               'Best Hyperparameters Found: {}'.format(returned_pms.values))
        if generate_plots:
            logger("Generating updated plots")
            init_plots, plot_names = generate_regression_plots(
                history, data, target_column)
            for x in range(len(plot_names)):
                plots[str(plot_names[x])] = init_plots[x]

        models['regression_ANN'] = {
            'id': models['regression_ANN']['id'],
            'model': returned_model,
            'target': target,
            "plots": plots,
            'preprocesser': models['regression_ANN']['preprocesser'],
            'interpreter': models['regression_ANN']['interpreter'],
            'test_data': {
                'X': X_test,
                'y': y_test
            },
            'hyperparameters': returned_pms.values,
            'losses': {
                'training_loss': history.history['loss'],
                'val_loss': history.history['val_loss']
            }
        }
        logger("Re-stored model under 'regression_ANN' key")

        # processing for classification feed forward NN
    elif model_to_tune == "classification_ANN":
        logger("Reading in data")
        logger("Tuning model hyperparameters...")
        dataReader = DataReader(dataset)
        data = dataReader.data_generator()
        target = models['classification_ANN']['target']
        target_column = data[models['classification_ANN']['target']]
        data = models['classification_ANN']['preprocesser'].transform(
            data.drop(target, axis=1))
        returned_model, returned_pms, history, X_test, y_test = tuneClass(
            data,
            target_column,
            models['classification_ANN']['num_classes'],
            max_layers=max_layers,
            min_layers=min_layers,
            min_dense=min_dense,
            max_dense=max_dense,
            executions_per_trial=executions_per_trial,
            max_trials=max_trials,
            activation=activation,
            loss=loss,
            directory=directory,
            metrics=metrics,
            epochs=epochs,
            step=step,
            verbose=verbose,
            test_size=test_size)
        plots = {}
        logger("->",
               'Best Hyperparameters Found: {}'.format(returned_pms.values))
        if generate_plots:
            logger("Generating updated plots")
            plots = generate_classification_plots(history, data, target_column,
                                                  returned_model, X_test,
                                                  y_test)

        logger("Re-stored model under 'classification_ANN' key")
        models['classification_ANN'] = {
            'id': models['classification_ANN']['id'],
            'model': returned_model,
            'hyperparameters': returned_pms.values,
            'plots': plots,
            'preprocesser': models['classification_ANN']['preprocesser'],
            'interpreter': models['classification_ANN']['interpreter'],
            'test_data': {
                'X': X_test,
                'y': y_test
            },
            'target': target,
            'losses': {
                'training_loss': history.history['loss'],
                'val_loss': history.history['val_loss']
            },
            'accuracy': {
                'training_accuracy': history.history['accuracy'],
                'validation_accuracy': history.history['val_accuracy']
            }
        }

    elif model_to_tune == "convolutional_NN":
        logger("Tuning model hyperparameters...")
        X_train, X_test, height, width, num_classes = get_image_data(models)
        logger('Located image data')
        model, returned_pms, history = tuneCNN(
            X_train,
            X_test,
            height,
            width,
            num_classes,
            executions_per_trial=executions_per_trial,
            max_trials=max_trials,
            seed=seed,
            objective=objective,
            directory=directory,
            patience=patience,
            epochs=epochs,
            verbose=verbose,
            test_size=test_size)
        logger("->", "Optimal image size identified: {}".format(
            (height, width, 3)))
        logger('Packaging HyperModel')
        logger("->",
               'Best Hyperparameters Found: {}'.format(returned_pms.values))
        logger("Re-stored model under 'convolutional_NN' key")

        models['convolutional_NN'] = {
            'id': models['convolutional_NN']['id'],
            'data_type': models['convolutional_NN']['data_type'],
            'data_path': models['convolutional_NN']['data_path'],
            'data': {
                'train': X_train,
                'test': X_test
            },
            'shape': models['convolutional_NN']['shape'],
            'model': model,
            'num_classes': models['convolutional_NN']['num_classes'],
            'data_sizes': models['convolutional_NN']['data_sizes'],
            'losses': {
                'training_loss': history.history['loss'],
                'val_loss': history.history['val_loss']
            },
            'accuracy': {
                'training_accuracy': history.history['accuracy'],
                'validation_accuracy': history.history['val_accuracy']
            }
        }
    clearLog()
    return models
コード例 #3
0
ファイル: feedforward_nn.py プロジェクト: tesseract-42/libra
def classification_ann(instruction,
                       callback=False,
                       dataset=None,
                       text=[],
                       ca_threshold=None,
                       preprocess=True,
                       callback_mode='min',
                       drop=None,
                       random_state=49,
                       test_size=0.2,
                       epochs=50,
                       generate_plots=True,
                       maximizer="val_accuracy",
                       save_model=False,
                       save_path=os.getcwd(),
                       add_layer={}):
    '''
    Body of the classification function used that is called in the neural network query
    if the data is categorical.
    :param many parameters: used to preprocess, tune, plot generation, and parameterizing the neural network trained.
    :return dictionary that holds all the information for the finished model.
    '''

    if dataset is None:
        dataReader = DataReader(get_file())
    else:
        dataReader = DataReader(dataset)
    logger("Reading in dataset")
    data = dataReader.data_generator()

    if drop is not None:
        data.drop(drop, axis=1, inplace=True)

    data, y, remove, full_pipeline = initial_preprocessor(
        data,
        instruction,
        preprocess,
        ca_threshold,
        text,
        test_size=test_size,
        random_state=random_state)
    logger("->", "Target column found: {}".format(remove))

    # Needed to make a custom label encoder due to train test split changes
    # Can still be inverse transformed, just a bit of extra work
    y = pd.concat([y['train'], y['test']], axis=0)

    num_classes = len(np.unique(y))

    if num_classes < 2:
        raise Exception("Number of classes must be greater than or equal to 2")

    X_train = data['train']
    X_test = data['test']

    if num_classes >= 2:
        # ANN needs target one hot encoded for classification
        one_hotencoder = OneHotEncoder()
        y = pd.DataFrame(one_hotencoder.fit_transform(
            np.reshape(y.values, (-1, 1))).toarray(),
                         columns=one_hotencoder.get_feature_names())

    y_train = y.iloc[:len(X_train)]
    y_test = y.iloc[len(X_train):]

    models = []
    losses = []
    accuracies = []
    model_data = []

    logger("Establishing callback function")

    # early stopping callback
    es = EarlyStopping(monitor=maximizer, mode='max', verbose=0, patience=5)

    callback_value = None
    if callback is not False:
        callback_value = [es]

    i = 0
    model = get_keras_model_class(data, i, num_classes, add_layer)
    logger("Training initial model")

    history = model.fit(X_train,
                        y_train,
                        callbacks=callback_value,
                        epochs=epochs,
                        validation_data=(X_test, y_test),
                        verbose=0)

    model_data.append(model)
    models.append(history)
    col_name = [[
        "Initial number of layers ", "| Training Accuracy ", "| Test Accuracy "
    ]]
    col_width = max(len(word) for row in col_name for word in row) + 2
    for row in col_name:
        print((" " * 2 * counter) + "| " +
              ("".join(word.ljust(col_width) for word in row)) + " |")
    values = []
    values.append(str(len(model.layers)))
    values.append("| " + str(history.history['accuracy'][
        len(history.history['val_accuracy']) - 1]))
    values.append("| " + str(history.history['val_accuracy'][
        len(history.history['val_accuracy']) - 1]))
    datax = []
    datax.append(values)
    for row in datax:
        print((" " * 2 * counter) + "| " +
              ("".join(word.ljust(col_width) for word in row)) + " |")
    # print((" " * 2 * counter)+ tabulate(datax, headers=col_name, tablefmt='orgtbl'))
    losses.append(history.history[maximizer][len(history.history[maximizer]) -
                                             1])
    accuracies.append(
        history.history['val_accuracy'][len(history.history['val_accuracy']) -
                                        1])
    # keeps running model and fit functions until the validation loss stops
    # decreasing

    logger("Testing number of layers")
    col_name = [[
        "Current number of layers", "| Training Accuracy", "| Test Accuracy"
    ]]
    col_width = max(len(word) for row in col_name for word in row) + 2

    for row in col_name:
        print((" " * 2 * counter) + "| " +
              ("".join(word.ljust(col_width) for word in row)) + " |")
    datax = []
    # while all(x < y for x, y in zip(accuracies, accuracies[1:])):
    while (len(accuracies) <= 2 or
           accuracies[len(accuracies) - 1] > accuracies[len(accuracies) - 2]):
        model = get_keras_model_class(data, i, num_classes, add_layer)
        history = model.fit(X_train,
                            y_train,
                            callbacks=callback_value,
                            epochs=epochs,
                            validation_data=(X_test, y_test),
                            verbose=0)

        values = []
        datax = []
        values.append(str(len(model.layers)))
        values.append("| " + str(history.history['accuracy'][
            len(history.history['accuracy']) - 1]))
        values.append("| " + str(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1]))
        datax.append(values)
        for row in datax:
            print((" " * 2 * counter) + "| " +
                  ("".join(word.ljust(col_width) for word in row)) + " |")
        del values, datax
        losses.append(
            history.history[maximizer][len(history.history[maximizer]) - 1])
        accuracies.append(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1])
        models.append(history)
        model_data.append(model)

        i += 1
    # print((" " * 2 * counter)+ tabulate(datax, headers=col_name, tablefmt='orgtbl'))
    # del values, datax

    final_model = model_data[accuracies.index(max(accuracies))]
    final_hist = models[accuracies.index(max(accuracies))]

    print("")
    logger('->',
           "Best number of layers found: " + str(len(final_model.layers)))
    logger(
        '->', "Training Accuracy: " + str(final_hist.history['accuracy'][
            len(final_hist.history['val_accuracy']) - 1]))
    logger(
        '->', "Test Accuracy: " + str(final_hist.history['val_accuracy'][
            len(final_hist.history['val_accuracy']) - 1]))

    # genreates appropriate classification plots by feeding all information
    plots = {}
    if generate_plots:
        plots = generate_classification_plots(models[len(models) - 1])

    if save_model:
        save(final_model, save_model, save_path)

    print("")
    logger("Stored model under 'classification_ANN' key")
    clearLog()

    K.clear_session()

    # stores the values and plots into the object dictionary
    return {
        'id': generate_id(),
        "model": final_model,
        'num_classes': num_classes,
        "plots": plots,
        "target": remove,
        "preprocessor": full_pipeline,
        "interpreter": one_hotencoder,
        'test_data': {
            'X': X_test,
            'y': y_test
        },
        'losses': {
            'training_loss': final_hist.history['loss'],
            'val_loss': final_hist.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': final_hist.history['accuracy'],
            'validation_accuracy': final_hist.history['val_accuracy']
        }
    }
コード例 #4
0
ファイル: feedforward_nn.py プロジェクト: tesseract-42/libra
def convolutional(instruction=None,
                  read_mode=None,
                  preprocess=True,
                  data_path=None,
                  verbose=0,
                  new_folders=True,
                  image_column=None,
                  training_ratio=0.8,
                  fine_tune=False,
                  augmentation=True,
                  custom_arch=None,
                  pretrained=None,
                  epochs=10,
                  height=None,
                  width=None,
                  save_as_tfjs=None,
                  save_as_tflite=None,
                  generate_plots=True):
    '''
    Body of the convolutional function used that is called in the neural network query
    if the data is presented in images.
    :param many parameters: used to preprocess, tune, plot generation, and parameterizing the convolutional neural network trained.
    :return dictionary that holds all the information for the finished model.
    '''

    # data_path = get_folder_dir()

    logger("Generating datasets for classes")

    LR = 0.001
    plots = {}
    if pretrained:
        if not height:
            height = 224
        if not width:
            width = 224
        if height != 224 or width != 224:
            raise ValueError(
                "For pretrained models, both 'height' and 'width' must be 224."
            )

    if preprocess:
        if custom_arch:
            raise ValueError(
                "If 'custom_arch' is not None, 'preprocess' must be set to false."
            )

        read_mode_info = set_distinguisher(data_path, read_mode)
        read_mode = read_mode_info["read_mode"]

        training_path = "/proc_training_set"
        testing_path = "/proc_testing_set"

        if read_mode == "setwise":
            processInfo = setwise_preprocessing(data_path, new_folders, height,
                                                width)
            if not new_folders:
                training_path = "/training_set"
                testing_path = "/testing_set"

        # if image dataset in form of csv
        elif read_mode == "csvwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = csv_preprocessing(read_mode_info["csv_path"],
                                            data_path, instruction,
                                            image_column, training_ratio,
                                            height, width)

        # if image dataset in form of one folder containing class folders
        elif read_mode == "classwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = classwise_preprocessing(data_path, training_ratio,
                                                  height, width)

    else:
        training_path = "/training_set"
        testing_path = "/testing_set"
        processInfo = already_processed(data_path)

    num_channels = 3
    color_mode = 'rgb'
    if processInfo["gray_scale"]:
        num_channels = 1
        color_mode = 'grayscale'

    input_shape = (processInfo["height"], processInfo["width"], num_channels)
    input_single = (processInfo["height"], processInfo["width"])
    num_classes = processInfo["num_categories"]
    loss_func = ""
    output_layer_activation = ""

    if num_classes > 2:
        loss_func = "categorical_crossentropy"
        output_layer_activation = "softmax"
    elif num_classes == 2:
        num_classes = 1
        loss_func = "binary_crossentropy"
        output_layer_activation = "sigmoid"

    logger("Creating convolutional neural network dynamically")

    # Convolutional Neural Network

    # Build model based on custom_arch configuration if given
    if custom_arch:
        with open(custom_arch, "r") as f:
            custom_arch_dict = json.load(f)
            custom_arch_json_string = json.dumps(custom_arch_dict)
            model = model_from_json(custom_arch_json_string)

    # Build an existing state-of-the-art model
    elif pretrained:

        arch_lower = pretrained.get('arch').lower()

        # If user specifies value of pretrained['weights'] as 'imagenet', weights pretrained on ImageNet will be used
        if 'weights' in pretrained and pretrained.get('weights') == 'imagenet':
            # Load ImageNet pretrained weights
            if arch_lower == "vggnet16":
                base_model = VGG16(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "vggnet19":
                base_model = VGG19(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet50":
                base_model = ResNet50(include_top=False,
                                      weights='imagenet',
                                      input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet101":
                base_model = ResNet101(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet152":
                base_model = ResNet152(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenet":
                base_model = MobileNet(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenetv2":
                base_model = MobileNetV2(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet121":
                base_model = DenseNet121(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet169":
                base_model = DenseNet169(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet201":
                base_model = DenseNet201(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

        else:
            # Randomly initialized weights
            if arch_lower == "vggnet16":
                model = VGG16(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "vggnet19":
                model = VGG19(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "resnet50":
                model = ResNet50(include_top=True,
                                 weights=None,
                                 classes=num_classes)
            elif arch_lower == "resnet101":
                model = ResNet101(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "resnet152":
                model = ResNet152(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenet":
                model = MobileNet(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenetv2":
                model = MobileNetV2(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet121":
                model = DenseNet121(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet169":
                model = DenseNet169(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet201":
                model = DenseNet201(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

    else:
        model = Sequential()
        # model.add(
        #     Conv2D(
        #         64,
        #         kernel_size=3,
        #         activation="relu",
        #         input_shape=input_shape))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Conv2D(64, kernel_size=3, activation="relu"))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Flatten())
        # model.add(Dense(num_classes, activation="softmax"))
        # model.compile(
        #     optimizer="adam",
        #     loss=loss_func,
        #     metrics=['accuracy'])
        model.add(
            Conv2D(filters=64,
                   kernel_size=5,
                   activation="relu",
                   input_shape=input_shape))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(units=256, activation="relu"))
        model.add(Dropout(0.25))
        model.add(Dense(units=num_classes, activation="softmax"))

    if pretrained and 'weights' in pretrained and pretrained.get(
            'weights') == 'imagenet':
        for layer in base_model.layers:
            layer.trainable = False

    opt = Adam(learning_rate=LR)

    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

    logger("Located image data")

    if augmentation:
        train_data = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
        test_data = ImageDataGenerator(rescale=1. / 255)

        logger('Dataset augmented through zoom, shear, flip, and rescale')
    else:
        train_data = ImageDataGenerator()
        test_data = ImageDataGenerator()

    logger("->", "Optimal image size identified: {}".format(input_shape))
    X_train = train_data.flow_from_directory(
        data_path + training_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["train_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])
    X_test = test_data.flow_from_directory(
        data_path + testing_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["test_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])

    if epochs <= 0:
        raise BaseException("Number of epochs has to be greater than 0.")

    print("\n")
    logger('Training image model')

    # model.summary()

    history = model.fit_generator(
        X_train,
        steps_per_epoch=X_train.n // X_train.batch_size,
        validation_data=X_test,
        validation_steps=X_test.n // X_test.batch_size,
        epochs=epochs,
        verbose=verbose)

    if fine_tune:

        logger(
            '->', 'Training accuracy: {}'.format(
                history.history['accuracy'][len(history.history['accuracy']) -
                                            1]))
        logger(
            '->',
            'Validation accuracy: {}'.format(history.history['val_accuracy'][
                len(history.history['val_accuracy']) - 1]))

        for layer in base_model.layers:
            layer.trainable = True

        opt = Adam(learning_rate=LR / 10)

        model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

        print("\n\n")
        logger('Training fine tuned model')

        fine_tuning_epoch = epochs + 10
        history_fine = model.fit_generator(
            X_train,
            steps_per_epoch=X_train.n // X_train.batch_size,
            validation_data=X_test,
            validation_steps=X_test.n // X_test.batch_size,
            epochs=fine_tuning_epoch,
            initial_epoch=history.epoch[-1],
            verbose=verbose)
        #frozen model acc and loss history
        acc = history.history['accuracy']
        val_acc = history.history['val_accuracy']

        loss = history.history['loss']
        val_loss = history.history['val_loss']

        #fine tuned model acc and loss history
        acc += history_fine.history['accuracy']
        val_acc += history_fine.history['val_accuracy']

        loss += history_fine.history['loss']
        val_loss += history_fine.history['val_loss']

        if generate_plots:
            plots = generate_fine_tuned_classification_plots(
                acc, val_acc, loss, val_loss, epochs)

    models = []
    losses = []
    accuracies = []
    model_data = []

    model_data.append(model)
    models.append(history)

    losses.append(
        history.history["val_loss"][len(history.history["val_loss"]) - 1])
    accuracies.append(
        history.history['val_accuracy'][len(history.history['val_accuracy']) -
                                        1])

    # final_model = model_data[accuracies.index(max(accuracies))]
    # final_hist = models[accuracies.index(max(accuracies))]

    if generate_plots and not fine_tune:
        plots = generate_classification_plots(models[len(models) - 1])

    print("\n")
    logger(
        '->', 'Final training accuracy: {}'.format(
            history.history['accuracy'][len(history.history['accuracy']) - 1]))
    logger(
        '->',
        'Final validation accuracy: {}'.format(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1]))
    # storing values the model dictionary

    number_of_examples = len(X_test.filenames)
    number_of_generator_calls = math.ceil(number_of_examples /
                                          (1.0 * X_test.batch_size))

    test_labels = []

    for i in range(0, int(number_of_generator_calls)):
        test_labels.extend(np.array(X_test[i][1]))

    predIdx = model.predict(X_test)

    if output_layer_activation == "sigmoid":
        real = [int(x) for x in test_labels]
        ans = []
        for i in range(len(predIdx)):
            ans.append(int(round(predIdx[i][0])))

    elif output_layer_activation == "softmax":
        real = []
        for ans in test_labels:
            real.append(ans.argmax())
        ans = []
        for r in predIdx:
            ans.append(r.argmax())

    else:
        print("NOT THE CASE")

    logger("Stored model under 'convolutional_NN' key")

    if save_as_tfjs:
        tfjs.converters.save_keras_model(model, "tfjsmodel")
        logger("Saved tfjs model under 'tfjsmodel' directory")

    if save_as_tflite:
        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()
        open("model.tflite", "wb").write(tflite_model)
        logger("Saved tflite model as 'model.tflite' ")

    clearLog()

    K.clear_session()

    return {
        'id': generate_id(),
        'data_type': read_mode,
        'data_path': data_path,
        'data': {
            'train': X_train,
            'test': X_test
        },
        'shape': input_shape,
        'res': {
            'real': real,
            'ans': ans
        },
        'model': model,
        'plots': plots,
        'losses': {
            'training_loss': history.history['loss'],
            'val_loss': history.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': history.history['accuracy'],
            'validation_accuracy': history.history['val_accuracy']
        },
        'num_classes': (2 if num_classes == 1 else num_classes),
        'data_sizes': {
            'train_size': processInfo['train_size'],
            'test_size': processInfo['test_size']
        }
    }
コード例 #5
0
ファイル: feedforward_nn.py プロジェクト: Samyak-Doshi/libra
def classification_ann(instruction,
                       dataset=None,
                       text=None,
                       ca_threshold=None,
                       preprocess=True,
                       callback_mode='min',
                       drop=None,
                       random_state=49,
                       test_size=0.2,
                       epochs=50,
                       generate_plots=True,
                       maximizer="val_loss",
                       save_model=True,
                       save_path=os.getcwd()):

    global currLog
    logger("Reading in dataset...")

    dataReader = DataReader(dataset)
    data = dataReader.data_generator()

    if drop is not None:
        data.drop(drop, axis=1, inplace=True)

    data, y, remove, full_pipeline = initial_preprocesser(
        data, instruction, preprocess, ca_threshold, text)
    logger("->", "Target Column Found: {}".format(remove))

    # Needed to make a custom label encoder due to train test split changes
    # Can still be inverse transformed, just a bit of extra work
    y = pd.concat([y['train'], y['test']], axis=0)

    num_classes = len(np.unique(y))

    X_train = data['train']
    X_test = data['test']

    # ANN needs target one hot encoded for classification
    one_hot_encoder = OneHotEncoder()

    y = pd.DataFrame(one_hot_encoder.fit_transform(
        np.reshape(y.values, (-1, 1))).toarray(),
                     columns=one_hot_encoder.get_feature_names())

    y_train = y.iloc[:len(X_train)]
    y_test = y.iloc[len(X_train):]

    models = []
    losses = []
    accuracies = []
    model_data = []

    logger("Establishing callback function...")

    # early stopping callback
    es = EarlyStopping(monitor=maximizer, mode='min', verbose=0, patience=5)

    i = 0
    model = get_keras_model_class(data, i, num_classes)
    logger("Training initial model...")
    history = model.fit(X_train,
                        y_train,
                        epochs=epochs,
                        validation_data=(X_test, y_test),
                        callbacks=[es],
                        verbose=0)

    model_data.append(model)
    models.append(history)
    col_name = [[
        "Initial number of layers ", "| Training Loss ", "| Test Loss "
    ]]
    col_width = max(len(word) for row in col_name for word in row) + 2
    for row in col_name:
        print((" " * 2 * counter) + "| " +
              ("".join(word.ljust(col_width) for word in row)) + " |")
    values = []
    values.append(str(len(model.layers)))
    values.append(
        "| " +
        str(history.history['loss'][len(history.history['val_loss']) - 1]))
    values.append(
        "| " +
        str(history.history['val_loss'][len(history.history['val_loss']) - 1]))
    datax = []
    datax.append(values)
    for row in datax:
        print((" " * 2 * counter) + "| " +
              ("".join(word.ljust(col_width) for word in row)) + " |")
    #print((" " * 2 * counter)+ tabulate(datax, headers=col_name, tablefmt='orgtbl'))
    losses.append(history.history[maximizer][len(history.history[maximizer]) -
                                             1])
    # keeps running model and fit functions until the validation loss stops
    # decreasing

    logger("Testing number of layers...")
    col_name = [["Current number of layers", "| Training Loss", "| Test Loss"]]
    col_width = max(len(word) for row in col_name for word in row) + 2

    for row in col_name:
        print((" " * 2 * counter) + "| " +
              ("".join(word.ljust(col_width) for word in row)) + " |")
    datax = []
    while (all(x > y for x, y in zip(losses, losses[1:]))):
        model = get_keras_model_class(data, i, num_classes)
        history = model.fit(X_train,
                            y_train,
                            epochs=epochs,
                            validation_data=(X_test, y_test),
                            callbacks=[es],
                            verbose=0)

        values = []
        datax = []
        values.append(str(len(model.layers)))
        values.append(
            "| " +
            str(history.history['loss'][len(history.history['val_loss']) - 1]))
        values.append("| " + str(history.history['val_loss'][
            len(history.history['val_loss']) - 1]))
        datax.append(values)
        for row in datax:
            print((" " * 2 * counter) + "| " +
                  ("".join(word.ljust(col_width) for word in row)) + " |")
        losses.append(
            history.history[maximizer][len(history.history[maximizer]) - 1])
        accuracies.append(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1])
        i += 1
    #print((" " * 2 * counter)+ tabulate(datax, headers=col_name, tablefmt='orgtbl'))
    #del values, datax
    final_model = model_data[losses.index(min(losses))]
    final_hist = models[losses.index(min(losses))]
    print("")
    logger('->',
           "Best number of layers found: " + str(len(final_model.layers)))
    logger(
        '->', "Training Accuracy: " + str(final_hist.history['accuracy'][
            len(final_hist.history['val_accuracy']) - 1]))
    logger(
        '->', "Test Accuracy: " + str(final_hist.history['val_accuracy'][
            len(final_hist.history['val_accuracy']) - 1]))

    # genreates appropriate classification plots by feeding all information
    plots = generate_classification_plots(models[len(models) - 1], data, y,
                                          model, X_test, y_test)

    if save_model:
        save(final_model, save_model)

    print("")
    logger("Stored model under 'classification_ANN' key")

    # stores the values and plots into the object dictionary
    return {
        'id': generate_id(),
        "model": final_model,
        'num_classes': num_classes,
        "plots": plots,
        "target": remove,
        "preprocesser": full_pipeline,
        "interpreter": one_hot_encoder,
        'losses': {
            'training_loss': final_hist.history['loss'],
            'val_loss': final_hist.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': final_hist.history['accuracy'],
            'validation_accuracy': final_hist.history['val_accuracy']
        }
    }