Exemplo n.º 1
0
def NN_fit(train_set, val_set, nn=5, epochs=10, width=10, layers=2):
    from NN import NeuralNetwork

    last_error = 100
    last_predicated = None
    fnn = None
    for x in range(nn):
        nn = NeuralNetwork()
        nn.train(
            train_set,
            val_set,
            epochs=epochs,
            width=width,
            layers=layers,
            batch_size=20,
            learning_rate=0.001,
        )
        predicted_y, _ = nn.predict(train_set.X)

        logger.info(
            f"NN train MSE {mean_squared_error(train_set.Y, predicted_y)}")
        predicted_y, _ = nn.predict(val_set.X)
        error = mean_squared_error(val_set.Y, predicted_y)
        logger.info(f"NN dev MSE {error}")

        if error < last_error:
            last_error = error
            fnn = nn

    return fnn
Exemplo n.º 2
0
def main(filename='data/iris-virginica.txt'):
    # Load data
    data = read_data('%s/%s' % (filepath, filename))

    X, y = data[:, :-1].astype(float), data[:, -1]

    class_vec = list(set(y))
    K = len(class_vec)

    Y = pd.get_dummies(y).astype(int).as_matrix()

    # Define parameters
    n = X.shape[0]
    d = X.shape[1]
    #
    # # Define layer sizes
    print(n, d, K)
    layers = [d, 5, K]

    model = NeuralNetwork(layers=layers,
                          num_epochs=1000,
                          learning_rate=0.10,
                          alpha=0.9,
                          activation_func='sigmoid',
                          epsilon=0.001,
                          print_details=True)
    model.fit(X, Y)

    Y_hat = model.predict(X)
    accuracy = compute_acc(Y_hat, Y)
    print('Model training accuracy:\t%.2f' % (accuracy))
Exemplo n.º 3
0
def main(filename='data/iris-virginica.txt'):
    # Load data
    data = read_data('%s/%s' % (filepath, filename))

    X, y = data[:,:-1].astype(float), data[:,-1]


    class_vec = list(set(y))
    K = len(class_vec)


    Y = pd.get_dummies(y).astype(int).as_matrix()


    # Define parameters
    n = X.shape[0]
    d = X.shape[1]
    #
    # # Define layer sizes
    print(n,d,K)
    layers = [d, 5, K]

    model = NeuralNetwork(layers=layers, num_epochs=1000, learning_rate=0.10, alpha=0.9,
                          activation_func='sigmoid', epsilon=0.001, print_details=True)
    model.fit(X, Y)

    Y_hat = model.predict(X)
    accuracy = compute_acc(Y_hat, Y)
    print('Model training accuracy:\t%.2f' % (accuracy))
Exemplo n.º 4
0
def my_solution2(X_train, X_test, y_train, y_test, hyperparams=None):
    if hyperparams == None:
        hyperparams = {
            'hidden_layer_sizes': (3, ),
            'learning_rate': 0.1,
            'epoch': 3852,
            'momentum': 0.04,
            'tol': 1e-10,
            'reg_coef': 0
        }

    if not hasattr(hyperparams, 'batch_size'):
        hyperparams['batch_size'] = X_train.shape[0]

    # train with self
    nn = NeuralNetwork(**hyperparams)
    nn.fit(X_train, y_train)
    y_pred = nn.predict(X_test)

    y_pred = y_pred.argmax(axis=1)
    y_test = y_test.argmax(axis=1)

    # evaluate
    print('My implements2:')
    print(classification_report(y_test, y_pred))

    return nn, y_pred, y_test
Exemplo n.º 5
0
def recognize(src):
    nn = NeuralNetwork([784, 250, 10], 'logistic')
    img_list = GetCutZip(src)
    final_result = ''
    for img_array in img_list:
        img_array = img_array.flatten()
        result_list = nn.predict(img_array)
        result = np.argmax(result_list)
        final_result = final_result + str(result)
    return final_result
Exemplo n.º 6
0
from NN import NeuralNetwork
from Normalizer import Normalizer
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split

cancer = load_breast_cancer()
X = cancer['data']
y = cancer['target']
length = len(cancer['feature_names'])

nn = NeuralNetwork([length, 30, 20, 10, 5, 1])

X_train, X_test, y_train, y_test = train_test_split(X, y)

normalize = Normalizer()
normalize.fit(X_train)
X_train = normalize.transform(X_train)
X_test = normalize.transform(X_test)

nn.fit(X_train, y_train, epochs=1000, verbose=False)
predictions = nn.predict(X_test)
print(nn.cost(predictions, y_test))
Exemplo n.º 7
0
x = preprocess(data)

train_x = x[:NUM_TRAIN]
train_y = y[:NUM_TRAIN]

test_x = x[-NUM_TEST:]
test_y = y[-NUM_TEST:]

params = {
    'train_inputs': train_x,
    'train_targets': train_y,
    'layer_dimentions': [SIZE**2, 450, 250, 50, 10],
    'learning_rate': 1e-3,
    'iterations': 250
}

nn = NN(params)
nn.train()

prediction = nn.predict(test_x)
accuracy = accuracy_score(test_y, prediction)
precision = precision_score(test_y, prediction, average='micro')
recall = recall_score(test_y, prediction, average='micro')
f1 = f1_score(test_y, prediction, average='micro')
conf_matrix = confusion_matrix(test_y, prediction)
print("accuracy:", accuracy)
print("precision:", precision)
print("recall:", recall)
print("f1:", f1)
print("confusion matrix:\n", conf_matrix)