Esempio n. 1
0
def data(root=''):
    """
    Data providing function:

    This function is separated from create_hyperparam_model() so that hyperopt
    won't reload data for each evaluation run.
    """
    labels, class_names = get_merged_labels_three(root='../')

    x = get_doc_vec_ticketing_message(root='../')
    y = labels

    n_values = len(class_names)
    y = np.eye(n_values)[y]
    data_set = DataSet.from_np_array(x,
                                     y,
                                     class_names=class_names,
                                     p_train=0.8,
                                     p_val=0.1)

    x_train = data_set.x_train
    y_train = data_set.y_train
    x_val = data_set.x_val
    y_val = data_set.y_val
    x_test = data_set.x_test
    y_test = data_set.y_test

    return x_train, y_train, x_val, y_val, x_test, y_test
Esempio n. 2
0
    n = words.shape[0]
    if n > max:
        max = n

new_x = np.zeros((x.shape[0], max, x[0].shape[1]))

i = 0
for words in x:
    new_x[i, :words.shape[0], :] = words
    i += 1

x = new_x

y = keras.utils.to_categorical(y)

data_set = DataSet.from_np_array(x, y, class_names=class_names)

path = "../classification/RNN/saved_model/rnn.model"

with Logger("rnn", root='../') as l:
    l.log_and_print(data_set)
    l.log("")

    if os.path.isfile(path):
        classifier = RnnClassifier.load(path, data_set, logger=l)
    else:
        classifier = RnnClassifier(data_set, logger=l)

    classifier.fit(path, epochs=20)

    classifier.validate()
Esempio n. 3
0
from classification.data_set import DataSet
from sklearn import preprocessing
from classification.ticketing_data import *
from classification.util.logger import Logger

labels, class_names = get_merged_labels_three(root='../')

x = get_doc_vec_ticketing_message(root='../')
y = labels

n_values = len(class_names)
y = np.eye(n_values)[y]

data_set = DataSet.from_np_array(x,
                                 y,
                                 class_names=class_names,
                                 p_train=0.8,
                                 p_val=0.1)

with Logger("multilayer_perceptron", root='../') as l:
    l.log_and_print(data_set)
    l.log("")

    # classifier = multilayer_perceptron.MultilayerPerceptron(data_set, num_classes=len(class_names), epoch=50, verbose=1,
    #                                                        logger=l)
    # classifier.fit()
    # classifier.validate()
    # classifier.metrics()
    # classifier.plot_confusion_matrix()

    model = hyperparameter_tuning.fit_hyper(root='../')