def start():
    global train_dataset_len, train_dim, class_number, X, y, NN
    test_size = simpledialog.askfloat(
        "Set test size",
        "Input the test size(0 ~ 0.9), which means the size of samples used to test model's accuracy: ",
        initialvalue=0.2,
        minvalue=0,
        maxvalue=0.9)
    test_samples = int(train_dataset_len) * test_size
    train_samples = train_dataset_len - test_samples
    info_label[
        "text"] = "Train Data Size:        %6d\nTest Data Size:         %6d" % (
            train_samples, test_samples)
    epoch = simpledialog.askinteger(
        "Set Epoch",
        "Input the Epochs(0~INF), which mean the iteration of training: ",
        initialvalue=8,
        minvalue=1)

    max_value = np.max(X)
    print(max_value)
    with open("max_value.txt", 'w') as f:
        f.write(str(max_value))

    X = X / max_value
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=test_size)
    NN = NeuralNetework()
    NN.create_model(train_dim, class_number)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)
    NN.train(X_train, y_train, X_test, y_test, epochs=epoch, batch_size=1)
    print(
        "\nFinished Model Training! Press 'Save Model' Button to save this model.\n"
    )
Ejemplo n.º 2
0
from Datasets import Datasets as dataset
from NN import NN
import os, glob, numpy as np

if __name__ == "__main__":
    data = dataset()
    data.read_data()
    training, label = data.get_titanic_training_data()
    testing, label_test = data.get_titanic_test_data()

    print(training.shape)
    print(testing.shape)

    ai = NN()

    # Training
    ai.create_model()
    ai.train_model(training, label, 500)
    ai.model.summary()

    # Testing
    # ai.load_model()
    ai.test_model(testing, label_test)