Example #1
0
def main():
    args = parse_args()

    # dataset
    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(flatten=True)

    # model layer dimensions
    input_dim = X_train.shape[1]
    num_classes = 10

    # create model
    model = Model()
    model.add(Dense(input_dim, 100), activation='relu')
    model.add(Dense(100, 200), activation='relu')
    model.add(Dense(200, 200), activation='relu')
    model.add(Dense(200, num_classes))

    # train model
    model.fit(X_train,
              y_train,
              val_data=(X_val, y_val),
              verbose=True,
              epochs=args.epochs,
              batch_size=args.batch_size,
              lr=args.lr)

    # evaluate model
    model.eval(X_test, y_test, verbose=True)
Example #2
0
    def test_backward_signal_is_zero(self):

        model = Dense(4, 3)
        model(np.random.randn(1, 4))
        actual = model.backward(np.zeros((1, 3)))

        self.assertEqual(np.zeros((1, 4)).tolist(), actual.tolist())
Example #3
0
 def predictor(self, inp, outp):
     model = []
     model.append(Conv2D(inp, 32))
     model.append(PReLU(model[-1]))
     model.append(Dense(model[-1], 128))
     model.append(PReLU(model[-1]))
     model.append(Dense(model[-1], nb_classes))
     return model
Example #4
0
    def test_dimensions(self):
        model = Dense(2, 1)
        actual = model(np.array([[1, 2]]))
        self.assertEqual((1, 1), actual.shape)

        model = Dense(1, 1)
        actual = model(np.array([[1]]))
        self.assertEqual((1, 1), actual.shape)

        model = Dense(4, 3)
        actual = model(np.random.randn(5, 4))
        self.assertEqual((5, 3), actual.shape)
Example #5
0
def main():
    np.random.seed(42)

    if DATA == 'linear':
        X_train, y_train, X_val, y_val = make_linear(x_min=-5,
                                                     x_max=5,
                                                     n_samples=10000,
                                                     n_features=11,
                                                     n_labels=N_LABELS,
                                                     a=3,
                                                     b=4,
                                                     sigma=0.5,
                                                     test_size=0.2)
    else:
        X_train, y_train, X_val, y_val = make_sinus(x_min=-5,
                                                    x_max=5,
                                                    n_samples=10000,
                                                    n_features=11,
                                                    n_labels=N_LABELS,
                                                    a=3,
                                                    b=4,
                                                    amplitude=10,
                                                    phase=1,
                                                    sigma=0.5,
                                                    test_size=0.2)

    model = Sequential([
        Dense(X_train.shape[1], 100),
        ReLU(),
        Dense(100, 200),
        Sigmoid(),
        Dense(200, N_LABELS)
    ])
    loss = MSE()

    for epoch in range(N_EPOCHS):

        if BATCH_SIZE:
            for x_batch, y_batch in minibatch_iterator(X_train,
                                                       y_train,
                                                       batch_size=BATCH_SIZE,
                                                       shuffle=True):
                train(model, x_batch, y_batch, learning_rate=LEARNING_RATE)
        else:
            train(model, X_train, y_train, learning_rate=LEARNING_RATE)

        train_loss = np.mean(loss(model(X_train), y_train))
        val_loss = np.mean(loss(model(X_val), y_val))

        print("Epoch", epoch)
        print("Train MSE:", train_loss)
        print("Val MSE:", val_loss)
Example #6
0
def test_forward():
    model = Sequential()

    model.add(Input(shape=[1]))

    model.add(Dense(40, np.tanh))

    model.add(Dense(31, lambda x: x))

    model.add(Dense(2, lambda x: x))

    print(model.predict(X))

    assert True
Example #7
0
def test_dense_integration():

    X = np.arange(10).reshape(-1, 1) / 10
    Y = X

    print("Linear")
    model = Sequential()

    model.add(Input(shape=[1]))

    model.add(Dense(1, lambda x: x))

    model.add(Dense(1, lambda x: x))

    model.compile(optimiser=SGDOptimiser(), loss=Msq())

    p = model.predict(X)

    print(np.mean(p))
    print(np.std(p))

    model.train(X, Y, 5, 10, True)

    print("Sinusoidal")
    model = Sequential()

    model.add(Input(shape=[1]))

    model.add(Dense(128, np.tanh))

    model.add(Dense(1, lambda x: x))

    model.compile(optimiser=SGDOptimiser(learning_rate=1e-3), loss=Msq())

    X = np.linspace(-5, 5, 10000).reshape(-1, 1)
    Y = np.sin(X)

    model.train(X, Y, 64, 10, True)

    print("Random Multidim")
    model = Sequential()

    model.add(Input(shape=[5]))

    model.add(Dense(256, np.tanh))

    model.add(Dense(256, np.tanh))

    model.add(Dense(5, lambda x: x))

    model.compile(optimiser=SGDOptimiser(learning_rate=1e-3), loss=Msq())

    X = np.random.rand(10000, 5)
    Y = np.sin(X)

    model.train(X, Y, 64, 10, True)
Example #8
0
    def setUp(self):
        X_train, y_train, X_val, y_val = make_linear(x_min=-5,
                                                     x_max=5,
                                                     n_samples=100,
                                                     n_features=11,
                                                     n_labels=1,
                                                     a=3,
                                                     b=4,
                                                     sigma=0.5,
                                                     test_size=0.2)

        self.X_train = X_train
        self.y_train = y_train

        self.model = Sequential([
            Dense(X_train.shape[1], 100),
            ReLU(),
            Dense(100, 200),
            Sigmoid(),
            Dense(200, 1)
        ])
Example #9
0
    def test_backward_non_zero_gradient(self):

        model = Dense(4, 3)
        model(np.random.randn(1, 4))

        weights = np.copy(model.weights)

        model.backward(np.ones((1, 3)))
        model.step(1)
        new_weights = model.weights

        self.assertNotEqual(weights.tolist(), new_weights.tolist())
Example #10
0
from data_prep import process_csv
from nn.model import NeuralNetwork
from nn.layers import InputLayer, Dense
from nn.loss import CrossEntropy
from nn.optimizer import SGD
from nn.activations import sigmoid, tanh

test_file = '../data/mnist_test.csv'
train_file = '../data/mnist_train.csv'

x_train, y_train = process_csv(train_file)
x_test, y_test = process_csv(test_file)

model = NeuralNetwork()

model.addLayer(InputLayer((1, 784)))
model.addLayer(Dense(neuron_count=300, activation=tanh()))
model.addLayer(Dense(neuron_count=10, activation=sigmoid()))

model.compile(loss=CrossEntropy(), optimizer=SGD(alpha=0.000006))

train_loss, train_acc, val_loss, val_acc = model.fit(x_test,
                                                     y_test,
                                                     validation_set=True,
                                                     validation_split=0.1,
                                                     epochs=1,
                                                     batch_size=100)
Example #11
0
    def __init__(self,
                 layers,
                 data_source,
                 learning_rate=0.01,
                 steps=1000,
                 minibatch_size=100,
                 optimizer='adam',
                 loss_function='mse',
                 case_fraction=1.0,
                 validation_fraction=0.1,
                 test_fraction=0.2,
                 validation_interval=50,
                 session=None,
                 output_functions=None,
                 one_hot_encode_target=False,
                 accuracy_argmax=False):
        """
        Instantiates a new network.
        :param layers: Specification of the network's layers. Can either be an array of numbers, interpreted as number
        of nodes in a sequence of dense layers, or a sequence of layers as given in the layers module.
        :param data_source: The data source, either as a path to a CSV file, or as a list of cases.
        :param learning_rate: The learning rate used by the neural network optimizer. Default: 0.01
        :param steps: The number of training steps to be carried out. Default: 1000
        :param minibatch_size: The number of cases included in each minibatch during training. Default: 100
        :param optimizer: The optimizer used when training the network ['adam', 'adadelta', 'adagrad',
                            'gradient_descent', 'rmsprop']. Default: 'adam'
        :param loss_function: The loss function used to evaluate the performance of the network
                                ['mse', 'mae', 'cross_entropy']. Default: 'mse'
        :param case_fraction: The fraction of the data set to be used for training, validation and testing sets.
                                Default: 1.0
        :param validation_fraction: The fraction of the data to be used for validation. Default: 0.1
        :param test_fraction: The fraction of the data to be used for testing. Default: 0.2
        :param validation_interval: The interval, in number of minibatches run through the network, between each
                                    validation test. Default: 50
        :param session: A tf.session to be used by the network. If None, a new one is automatically created.
                        Default: None
        :param output_functions: A list of output_functions to be applied as a final stage in the network. Not applied
                                during training. Default: None
        :param one_hot_encode_target: If True, the target will be converted to a one_hot_vector, with length based on
                                        the output size of the network. Requires an integer target. Default: False
        """

        self.inputs = None
        self.targets = None
        self.outputs = None
        self.raw_outputs = None
        self.training_op = None
        self.loss = None
        self.accuracy = None
        self.summaries = []
        self.saver = None
        self.graph = tf.Graph()

        with self.graph.as_default():
            if type(layers[0]) == int:
                self.layers = []
                for i in range(len(layers) - 1):
                    self.layers.append(Dense(layers[i], layers[i + 1]))
            else:
                self.layers = layers

        self.learning_rate = learning_rate
        self.steps = steps
        self.minibatch_size = minibatch_size
        self.optimizer = _optimizers[optimizer]
        self.loss_function = _loss_functions[loss_function]
        self.validation_interval = validation_interval
        self.session = session
        self.one_hot_encode_target = one_hot_encode_target
        self.accuracy_argmax = accuracy_argmax

        if output_functions == 'argmax_one_hot':
            self.output_functions = [
                lambda x: tf.argmax(x, axis=1),
                lambda x: tf.one_hot(x, self.layers[-1].output_shape[0])
            ]
        else:
            self.output_functions = output_functions

        self.data = load_data(data_source, case_fraction, validation_fraction,
                              test_fraction)
Example #12
0
"""
    The example of xor which cannot be learned by a linear model
"""

from nn.model import Sequential
from nn.layers import Dense, Tanh, Sigmoid, Relu
from nn.train import train
import numpy as np

inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = np.array([[0.0], [1.0], [1.0], [0.0]])

net = Sequential()
net.add(Dense(2, 2))
net.add(Tanh())
net.add(Dense(2, 1))
net.add(Sigmoid())

train(net, inputs, targets)
print(net(np.expand_dims(inputs[0], 0)))
print(inputs[0].shape)

for X, y in zip(inputs, targets):
    predicted = net(np.expand_dims(X, 0))

    print(X, predicted, y)