예제 #1
0
def main():
    # data
    train_x, test_x, train_y, test_y = dataloader.load('mnist')
    # train_x, train_y = dataloader.load('xor', split=False)

    # use a random batch
    BATCH_SIZE = 32
    used_batch = np.random.permutation(len(train_x))[:BATCH_SIZE]
    batch_x = train_x[used_batch]
    batch_y = train_y[used_batch]

    nn = NeuralNetwork(layers=[
        layers.Dense(512, input_shape=(784, )),
        layers.Activation('tanh'),
        layers.Dense(256),
        layers.Activation('tanh'),
        layers.Dense(10),
        layers.Activation('softmax')
    ],
                       optimizer='rmsprop',
                       initializer='he-et-al')

    trainer = Trainer(nn, loss='cross-entropy', metrics=['accuracy'])

    # Training
    gc = GradientChecker(nn, trainer)

    # Check for N steps
    N = 10000
    for _ in range(N):
        gc.check(batch_x, batch_y)
        trainer.batch_train(batch_x, batch_y)

    print(trainer.predict(batch_x))
def main():
    # data
    train_x, train_y = dataloader.load('xor', split=False)

    # use a random batch
    BATCH_SIZE = 4
    used_batch = np.random.permutation(len(train_x))[:BATCH_SIZE]
    batch_x = train_x[used_batch]
    batch_y = train_y[used_batch]

    # Network implementation
    nn = NeuralNetwork(layers=[
        layers.Dense(300, input_shape=(2, )),
        layers.Activation('tanh'),
        layers.Dense(200),
        layers.Activation('leaky-relu'),
        layers.Dense(1)
    ],
                       optimizer='rmsprop')

    # Training
    trainer = Trainer(nn, loss='mean-square', print_step_mod=1000)
    gc = GradientChecker(nn, trainer)

    # Check for N steps
    N = 10000
    for _ in range(N):
        gc.check(batch_x, batch_y)
        trainer.batch_train(batch_x, batch_y)
예제 #3
0
def main():
    # data
    train_x, test_x, train_y, test_y = dataloader.load('mnist')

    # net
    nn = NeuralNetwork(layers=[
        layers.Dense(500, input_shape=(784, )),
        layers.Activation('relu'),
        layers.Dense(200),
        layers.Activation('relu'),
        layers.Dense(500),
        layers.Activation('relu'),
        layers.Dense(784)
    ],
                       optimizer='rmsprop',
                       initializer='he-et-al')

    trainer = Trainer(nn, loss='mean-square')

    epochs = 10
    for i in range(epochs):
        trainer.train(train_x, train_x, epochs=1)
        plt.imshow(test_x[0].reshape(28, 28))
        plt.show()
        plt.imshow(trainer.predict(np.array([test_x[0]])).reshape(28, 28))
        plt.show()
def main():
    # Generating fake data: 7 * X + 15
    SYNT_TRAIN_SIZE = 200
    train_x = np.random.rand(SYNT_TRAIN_SIZE)
    train_y = np.reshape(
        7 * train_x + 15 + np.random.normal(0, 0.8, size=SYNT_TRAIN_SIZE),
        (SYNT_TRAIN_SIZE, 1))
    train_x = np.reshape(train_x, (SYNT_TRAIN_SIZE, 1))

    plt.plot(train_x, train_y, 'ro', alpha=0.5)
    plt.show()

    # Network implementation
    nn = NeuralNetwork(layers=[layers.Dense(1, input_shape=(1, ))],
                       optimizer='sgd')

    # Training
    trainer = Trainer(nn, loss='mean-square', print_step_mod=1)
    trainer.train(train_x, train_y, epochs=30000, batch_size=-1)

    print('Learned parameters:')
    print('weights:', nn.layers[0].W)
    print('biases:', nn.layers[0].b)
    print('Input function: 7 * X + 15')
    plt.plot(train_x,
             nn.forward(train_x),
             'bo',
             train_x,
             train_y,
             'ro',
             alpha=0.5)
    plt.show()
def main():
    # data
    train_x, test_x, train_y, test_y = dataloader.load('mnist')

    # net
    nn = NeuralNetwork(
        layers=[
            layers.Dense(512, input_shape=(784, )),
            layers.Activation('relu'),
            layers.Dense(256),
            layers.Activation('relu'),
            layers.Dense(10),
            layers.Activation('softmax')
        ], optimizer='rmsprop', initializer='he-et-al')
    
    trainer = Trainer(nn, loss='cross-entropy', metrics=['accuracy'])

    trainer.train(train_x, train_y, epochs=10, test_size=0.1)
    print(trainer.eval(test_x, test_y))
예제 #6
0
def main():
    # Getting data
    train_x, train_y = dataloader.load('xor', split=False)

    # Network implementation
    nn = NeuralNetwork(
        layers=[
            layers.Dense(4, input_shape=(2, )),
            layers.Activation('tanh'),
            layers.Dense(1)
        ],
        optimizer='rmsprop',
    )

    # Training
    trainer = Trainer(nn, loss='mean-square', print_step_mod=1000)
    trainer.train(train_x, train_y, epochs=5000, batch_size=-1)

    print(trainer.predict(train_x))
def main():
    # Generating fake data: Class1 around (0, 0) and Class2 around (10, 10)
    train_x, train_y = make_blobs(n_samples=200,
                                  n_features=2,
                                  cluster_std=1.0,
                                  centers=[(0, 0), (10, 10)],
                                  shuffle=False,
                                  random_state=42)
    one_hot = OneHotEncoder(sparse=False)
    print(train_x)
    train_y_one_hot = one_hot.fit_transform(train_y.reshape(len(train_y), 1))

    plt.scatter(train_x[:, 0], train_x[:, 1], alpha=0.5, c=train_y)
    plt.show()

    # Network implementation
    nn = NeuralNetwork(layers=[
        layers.Dense(2, input_shape=(2, )),
        layers.Activation('softmax')
    ],
                       optimizer='sgd')

    # Training
    trainer = Trainer(nn,
                      loss='cross-entropy',
                      print_step_mod=100,
                      metrics=['accuracy'])
    trainer.train(train_x, train_y_one_hot, epochs=45, batch_size=20)

    print('Learned parameters:')
    print('weights:', nn.layers[0].W)
    print('biases:', nn.layers[0].b)

    eval_x = np.random.uniform(low=-3, high=13, size=(200, 2))

    plt.scatter(train_x[:, 0], train_x[:, 1], alpha=0.5)
    plt.scatter(eval_x[:, 0],
                eval_x[:, 1],
                alpha=0.5,
                c=(np.argmax(nn.forward(eval_x), axis=1) + 2))
    plt.show()
예제 #8
0
def main():
    # data
    train_x, test_x, train_y, test_y = dataloader.load('mnist')
    # net
    nn = NeuralNetwork(layers=[layers.Dense(10, input_shape=784)])
    trainer = Trainer(nn)