def test_high_acc():
    output_layer = 10

    training_images = mnist.train_images()
    training_labels = mnist.train_labels()
    testing_images = mnist.test_images()
    testing_labels = mnist.test_labels()

    training_inputs = training_images.reshape(
        training_images.shape[0],
        training_images.shape[1] * training_images.shape[2]).astype('float32')
    normalized_inputs = training_inputs / 255
    normalized_outputs = np.eye(output_layer)[training_labels]

    testing_inputs = testing_images.reshape(
        testing_images.shape[0],
        testing_images.shape[1] * testing_images.shape[2]).astype('float32')
    norm_test_inputs = testing_inputs / 255
    norm_test_outputs = testing_labels

    layers = [784, 30, 10]

    learning_rate = 0.001
    batch_size = 1
    epochs = 5

    nn = NeuralNetwork(layers, batch_size, epochs, learning_rate)
    nn.fit(normalized_inputs, normalized_outputs, False)
    acc = nn.accuracy_test(norm_test_inputs, norm_test_outputs)
    assert (acc > 90)
Пример #2
0
def main():
    output_layer = 10

    training_images = mnist.train_images()
    training_labels = mnist.train_labels()
    testing_images = mnist.test_images()
    testing_labels = mnist.test_labels()

    training_inputs = training_images.reshape(
        training_images.shape[0],
        training_images.shape[1] * training_images.shape[2]).astype('float32')
    normalized_inputs = training_inputs / 255
    normalized_outputs = np.eye(output_layer)[training_labels]

    testing_inputs = testing_images.reshape(
        testing_images.shape[0],
        testing_images.shape[1] * testing_images.shape[2]).astype('float32')
    norm_test_inputs = testing_inputs / 255
    norm_test_outputs = testing_labels

    layers = [784, 30, 10]
    learning_rate = 0.001
    batch_size = 1
    epochs = 5

    nn = NeuralNetwork(layers, batch_size, epochs, learning_rate)
    nn.fit(normalized_inputs, normalized_outputs)
    nn.time_stamps_test()
Пример #3
0
def a_realistic_use_case():
    # === MNIST example
    from sklearn.datasets import load_digits
    from sklearn.preprocessing import StandardScaler
    from sklearn.model_selection import train_test_split

    digits = load_digits()
    X_scale = StandardScaler()
    X = X_scale.fit_transform(digits.data)

    y = digits.target
    np.random.seed(1000)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)

    def to_vect(y):
        y_vect = np.zeros((len(y), 10))
        for i in range(len(y)):
            y_vect[i, y[i]] = 1
        return y_vect

    y_train = to_vect(y_train)
    y_test = to_vect(y_test)

    three_layer_nn_sse = NeuralNetwork(
        nn_structure=[64, 30, 10],
        f=[AF.sigmoid, AF.sigmoid],
        df=[AF.sigmoid_derivative, AF.sigmoid_derivative],
        J=LF.loss_sse,
        dJ=LF.loss_deriv_sse)

    loss = three_layer_nn_sse.train(X_train,
                                    y_train,
                                    n_epoch=3000,
                                    batch_size=len(y_train),
                                    alpha=0.25)
    plt.plot(loss)
    plt.title('SSE Error')
    plt.xlabel('Iteration number')
    plt.ylabel('J')
    plt.savefig("2_results/three_layer_nn_train__real_case.png")
    plt.show()

    print("SSE Accuracy:", three_layer_nn_sse.predict(X_test, y_test), "%")

    print("------------------------------------")
    print("Test model output SSE Weights:", three_layer_nn_sse.get_weights())
    print("Test model output SSE Bias:", three_layer_nn_sse.get_bias())
Пример #4
0
n = 10000
X = np.linspace(-15, 15, num=n)
noise = np.random.normal(0, 0.1, n)
y = 4 * np.sin(X / 3) + 3 * np.cos(X) - X + noise

trainingSetSize = int(0.8 * n)
trainIdxs = np.random.choice(n, trainingSetSize, replace=False)
valIdxs = np.array(list(set(np.arange(n)) - set(trainIdxs)))

train_X, train_y = X[trainIdxs][:, None], y[trainIdxs][:, None]
val_X, val_y = X[valIdxs][:, None], y[valIdxs][:, None]

model = NeuralNetwork(size=[1, 50, 50, 1],
                      activation=[LeakyRelu(),
                                  LeakyRelu(),
                                  Linear()],
                      loss=Quadratic(),
                      regression=True)

model.SGD(train_X,
          train_y,
          val_X,
          val_y,
          epochs=200,
          batch_percent=0.0005,
          eta=0.001,
          lmbda=0.5,
          verbose=True)
modelY = model.feedforward(val_X)

plt.title("f(x) = 3cosx + 4sin(x/3) - x + Normal(0, 0.1)")
Пример #5
0
def not_a_realistic_use_case():
    # Initial Settings =====
    np.random.seed(0)
    nn_img_size = 32
    num_classes = 3
    learning_rate = 0.0001
    num_epochs = 500
    batch_size = 4

    trainset = DataSet.case(path='../Data/images/db/train/*/*.jpg',
                            pattern='(?<=train\/)(.*?)(?=\/)')
    testset = DataSet.case(path='../Data/images/db/test/*.jpg',
                           pattern='(?<=test\/)(.*?)(?=.jpg)')

    # Prepare Dataset =====

    def reduce_normalize(img):
        img = Filter.reduce_size(img.astype(np.float64),
                                 target_size=(32, 32)).flatten()

        mean = np.mean(img)
        std = np.std(img)
        return (img - mean) / std

    dataset = DataSet(trainset, testset, transform=reduce_normalize)

    X_train, Y_train = dataset.get_train()
    X_test, Y_test = dataset.get_test()

    # Train and Test =====
    np.random.seed(1)
    two_layer_nn_mse = NeuralNetwork(
        nn_structure=[nn_img_size**2, num_classes],
        f=[AF.relu],
        df=[AF.relu_derivative],
        J=LF.loss_mse,
        dJ=LF.loss_deriv_mse)

    two_layer_nn_ce = NeuralNetwork(nn_structure=[nn_img_size**2, num_classes],
                                    f=[AF.relu],
                                    df=[AF.relu_derivative],
                                    J=LF.loss_crossentropy,
                                    dJ=LF.loss_deriv_crossentropy)

    mse_loss = two_layer_nn_mse.train(X_train,
                                      Y_train,
                                      n_epoch=num_epochs,
                                      batch_size=batch_size,
                                      alpha=learning_rate)
    ce_loss = two_layer_nn_ce.train(X_train,
                                    Y_train,
                                    n_epoch=num_epochs,
                                    batch_size=batch_size,
                                    alpha=learning_rate)

    plt.subplot(1, 2, 1)
    plt.plot(mse_loss)
    plt.title('MSE Error')
    plt.xlabel('Epoch number')
    plt.ylabel('Average J   ')

    plt.subplot(1, 2, 2)
    plt.plot(ce_loss)
    plt.title('Cross Entropy')
    plt.xlabel('Epoch number')
    plt.ylabel('Average J   ')
    plt.savefig("2_results/two_layer_nn_train__loss_function_comparison.png")
    plt.show()

    print("MSE Accuracy:", two_layer_nn_mse.predict(X_test, Y_test), "%")
    print("CE Accuracy:", two_layer_nn_ce.predict(X_test, Y_test), "%")

    print("------------------------------------")
    print("Test model output MSE Weights:", two_layer_nn_mse.get_weights())
    print("Test model output MSE Bias:", two_layer_nn_mse.get_bias())
    print("------------------------------------")
    print("Test model output CE Weights:", two_layer_nn_ce.get_weights())
    print("Test model output CE Bias:", two_layer_nn_ce.get_bias())
Пример #6
0
filename = sys.argv[1]

numInputs = 3
numFeatures = 4
numOutputs = 1

#positive = td.orange_torpedo_cover * 10
positive = td.orange_bin_cover + td.orange_avhs_pool_bin_cover
#negative = td.yellow_torpedo_board + td.blue_water + td.white_glare + td.white_wall + td.green_wall + td.wall_behind_torpedo + td.avhs_pool_floor
negative = td.white_bin_border + td.black_bin_background + td.yellow_bin_silhouette + td.green_ground + td.avhs_pool_floor

inputs = np.array(positive + negative, dtype=np.float32) / 256
targets = np.array([[1]]*len(positive) + [[0]]*len(negative), dtype=np.float32)

nn = NeuralNetwork(numInputs, numFeatures, numOutputs)

#nn.load(filename)

i = 0
while True:
	nn.train(inputs, targets)

	if i % (256**1) == 0:
		print("{} {}".format(int(i/256), nn.evaluate(inputs, targets)))
	if i % (256**1 * 1) == 0:
		nn.save(filename)
		print("{}".format(nn.session().run(nn.weights())))
	i += 1

Пример #7
0
#!/usr/bin/env python3

import sys

import numpy as np

from lib.neural_network import NeuralNetwork
from lib import image as im

filename = sys.argv[1]

nn = NeuralNetwork(3, 4, 1)
nn.load(filename)

while True:
    imgI = im.read(sys.stdin.buffer)[:, :, [2, 1, 0]]
    img_shape = imgI.shape

    inputs = imgI.reshape([-1, 3]).astype(np.float32) / 256

    outputs = nn.apply(inputs)

    imgO = (np.concatenate([outputs] * 3, 1).reshape(img_shape) * 255).astype(
        np.uint8)

    im.write(sys.stdout.buffer.raw, imgO)
#	im.write(sys.stdout.buffer.raw, np.concatenate([imgI[:,:,[2,1,0]], imgO]))
Пример #8
0
u = _Unpickler(f)
u.encoding = 'latin1'

TRAINING, VALIDATION, TESTING = u.load()


def preprocess(data):
    x, y = data[0], data[1]
    # One hot
    y = np.eye(10)[y]
    return x, y


train_X, train_y = preprocess(TRAINING)
val_X, val_y = preprocess(VALIDATION)

model = NeuralNetwork(size=[784, 30, 10],
                      activation=[ReLU(), Sigmoid()],
                      loss=CrossEntropy())

# Eta = 0.5, lmbda=0.5 for crossentropy loss
# Eta = 3 for quadratic loss
model.SGD(train_X,
          train_y,
          val_X,
          val_y,
          epochs=10,
          batch_percent=0.0002,
          eta=0.05,
          lmbda=0.5)