Пример #1
0
def main(argv):
    np.random.seed(1337)
    np.seterr(all='ignore')
    warnings.simplefilter(action='ignore', category=FutureWarning)

    print()
    print('Classification Experiment: Spambase')
    print()

    data = np.loadtxt('./data/csv/spambase.csv', delimiter=',')
    X = preprocessing.scale(data[:, :57])
    Y = data[:, 57].astype(int)

    ohe = one_hot.OneHotEncoder(Y)

    X, X_t, Y, Y_t = model_selection.train_test_split(X,
                                                      ohe.encode(Y),
                                                      train_size=0.75)

    nn = Network()

    nn.add(InputLayer(57, learning=0.25, regular=0.001, momentum=0.0125))
    nn.add(
        HiddenLayer(100,
                    learning=0.25,
                    regular=0.001,
                    momentum=0,
                    function=LeakyReLU()))
    nn.add(
        HiddenLayer(100,
                    learning=0.25,
                    regular=0.001,
                    momentum=0,
                    function=LeakyReLU()))
    nn.add(HiddenLayer(50, learning=0.25, regular=0.001, momentum=0.0125))
    nn.add(HiddenLayer(10, learning=0.25, regular=0.001, momentum=0.0125))
    nn.add(OutputLayer(2))

    nn.fit(X, Y, batch=250, epochs=1000)

    P = nn.predict(X_t)

    P = ohe.decode(P)
    Y_t = ohe.decode(Y_t)

    print()
    print()
    print()
    print('                   Result: {:.2f}% Correct'.format(
        100 * (Y_t == P).sum() / float(len(Y_t))))
    print()
    print('    Classification Report:')
    print()
    print(metrics.classification_report(Y_t, P))
    print()
    print('         Confusion Matrix:')
    print()
    print(metrics.confusion_matrix(Y_t, P))
    print()
Пример #2
0
def main(argv):
    np.random.seed(1337)
    np.seterr(all='ignore')
    warnings.simplefilter(action='ignore', category=FutureWarning)

    print()
    print('Classification Experiment: Digits')
    print()

    train = np.loadtxt('./data/csv/digits_train.csv', delimiter=',')
    X = preprocessing.scale(train[:, :64])
    Y = train[:, 64].astype(int)

    test = np.loadtxt('./data/csv/digits_test.csv', delimiter=',')
    X_t = preprocessing.scale(test[:, :64])
    Y_t = test[:, 64].astype(int)

    ohe = one_hot.OneHotEncoder(np.concatenate((Y, Y_t), axis=0))

    nn = Network()

    nn.add(InputLayer(64, learning=0.25, regular=0.001, momentum=0.0125))
    nn.add(
        HiddenLayer(100,
                    learning=0.25,
                    regular=0.001,
                    momentum=0,
                    function=LeakyReLU()))
    nn.add(
        HiddenLayer(100,
                    learning=0.25,
                    regular=0.001,
                    momentum=0,
                    function=LeakyReLU()))
    nn.add(HiddenLayer(75, learning=0.25, regular=0.001, momentum=0.0125))
    nn.add(HiddenLayer(25, learning=0.25, regular=0.001, momentum=0.0125))
    nn.add(OutputLayer(10))

    nn.fit(X, ohe.encode(Y), batch=250, epochs=500)

    P = nn.predict(X_t)

    P = ohe.decode(P)

    print()
    print()
    print()
    print('                   Result: {:.2f}% Correct'.format(
        100 * (Y_t == P).sum() / float(len(Y_t))))
    print()
    print('    Classification Report:')
    print()
    print(metrics.classification_report(Y_t, P))
    print()
    print('         Confusion Matrix:')
    print()
    print(metrics.confusion_matrix(Y_t, P))
    print()
Пример #3
0
def main(argv):
    np.random.seed(2704)
    np.seterr(all='ignore')
    warnings.simplefilter(action='ignore', category=FutureWarning)

    print()
    print('Classification Experiment: Red Wine')
    print()

    data = imbalanced.oversample(
        pd.read_csv('./data/csv/wine_red.csv', sep=';'), 'quality')
    X = data.drop(['quality'], axis=1).values
    Y = data.quality.values

    sclr = preprocessing.StandardScaler().fit(X)

    ohe = one_hot.OneHotEncoder(Y)

    X, X_t, Y, Y_t = model_selection.train_test_split(X,
                                                      ohe.encode(Y),
                                                      train_size=0.5)

    X = sclr.transform(X)
    X_t = sclr.transform(X_t)

    nn = Network()

    nn.add(InputLayer(11, learning=0.25, regular=0.005, momentum=0.01))
    nn.add(
        HiddenLayer(100,
                    learning=0.25,
                    regular=0.005,
                    momentum=0,
                    function=LeakyReLU()))
    nn.add(
        HiddenLayer(100,
                    learning=0.25,
                    regular=0.005,
                    momentum=0,
                    function=LeakyReLU()))
    nn.add(HiddenLayer(50, learning=0.25, regular=0.005, momentum=0.01))
    nn.add(HiddenLayer(25, learning=0.25, regular=0.005, momentum=0.01))
    nn.add(OutputLayer(6))

    nn.fit(X, Y, batch=500, epochs=1000)

    P = nn.predict(X_t)

    P = ohe.decode(P)
    Y_t = ohe.decode(Y_t)

    print()
    print()
    print()
    print('                   Result: {:.2f}% Correct'.format(
        100 * (Y_t == P).sum() / float(len(Y_t))))
    print()
    print('    Classification Report:')
    print()
    print(metrics.classification_report(Y_t, P))
    print()
    print('         Confusion Matrix:')
    print()
    print(metrics.confusion_matrix(Y_t, P))
    print()