示例#1
0
def main():
    # data
    train_x, test_x, train_y, test_y = dataloader.load('mnist')
    # train_x, train_y = dataloader.load('xor', split=False)

    # use a random batch
    BATCH_SIZE = 32
    used_batch = np.random.permutation(len(train_x))[:BATCH_SIZE]
    batch_x = train_x[used_batch]
    batch_y = train_y[used_batch]

    nn = NeuralNetwork(layers=[
        layers.Dense(512, input_shape=(784, )),
        layers.Activation('tanh'),
        layers.Dense(256),
        layers.Activation('tanh'),
        layers.Dense(10),
        layers.Activation('softmax')
    ],
                       optimizer='rmsprop',
                       initializer='he-et-al')

    trainer = Trainer(nn, loss='cross-entropy', metrics=['accuracy'])

    # Training
    gc = GradientChecker(nn, trainer)

    # Check for N steps
    N = 10000
    for _ in range(N):
        gc.check(batch_x, batch_y)
        trainer.batch_train(batch_x, batch_y)

    print(trainer.predict(batch_x))
def main():
    # data
    train_x, train_y = dataloader.load('xor', split=False)

    # use a random batch
    BATCH_SIZE = 4
    used_batch = np.random.permutation(len(train_x))[:BATCH_SIZE]
    batch_x = train_x[used_batch]
    batch_y = train_y[used_batch]

    # Network implementation
    nn = NeuralNetwork(layers=[
        layers.Dense(300, input_shape=(2, )),
        layers.Activation('tanh'),
        layers.Dense(200),
        layers.Activation('leaky-relu'),
        layers.Dense(1)
    ],
                       optimizer='rmsprop')

    # Training
    trainer = Trainer(nn, loss='mean-square', print_step_mod=1000)
    gc = GradientChecker(nn, trainer)

    # Check for N steps
    N = 10000
    for _ in range(N):
        gc.check(batch_x, batch_y)
        trainer.batch_train(batch_x, batch_y)
示例#3
0
def main():
    # data
    train_x, test_x, train_y, test_y = dataloader.load('mnist')

    # net
    nn = NeuralNetwork(layers=[
        layers.Dense(500, input_shape=(784, )),
        layers.Activation('relu'),
        layers.Dense(200),
        layers.Activation('relu'),
        layers.Dense(500),
        layers.Activation('relu'),
        layers.Dense(784)
    ],
                       optimizer='rmsprop',
                       initializer='he-et-al')

    trainer = Trainer(nn, loss='mean-square')

    epochs = 10
    for i in range(epochs):
        trainer.train(train_x, train_x, epochs=1)
        plt.imshow(test_x[0].reshape(28, 28))
        plt.show()
        plt.imshow(trainer.predict(np.array([test_x[0]])).reshape(28, 28))
        plt.show()
def main():
    # data
    train_x, test_x, train_y, test_y = dataloader.load('mnist')

    # net
    nn = NeuralNetwork(
        layers=[
            layers.Dense(512, input_shape=(784, )),
            layers.Activation('relu'),
            layers.Dense(256),
            layers.Activation('relu'),
            layers.Dense(10),
            layers.Activation('softmax')
        ], optimizer='rmsprop', initializer='he-et-al')
    
    trainer = Trainer(nn, loss='cross-entropy', metrics=['accuracy'])

    trainer.train(train_x, train_y, epochs=10, test_size=0.1)
    print(trainer.eval(test_x, test_y))
示例#5
0
def main():
    # Getting data
    train_x, train_y = dataloader.load('xor', split=False)

    # Network implementation
    nn = NeuralNetwork(
        layers=[
            layers.Dense(4, input_shape=(2, )),
            layers.Activation('tanh'),
            layers.Dense(1)
        ],
        optimizer='rmsprop',
    )

    # Training
    trainer = Trainer(nn, loss='mean-square', print_step_mod=1000)
    trainer.train(train_x, train_y, epochs=5000, batch_size=-1)

    print(trainer.predict(train_x))
def main():
    # Generating fake data: Class1 around (0, 0) and Class2 around (10, 10)
    train_x, train_y = make_blobs(n_samples=200,
                                  n_features=2,
                                  cluster_std=1.0,
                                  centers=[(0, 0), (10, 10)],
                                  shuffle=False,
                                  random_state=42)
    one_hot = OneHotEncoder(sparse=False)
    print(train_x)
    train_y_one_hot = one_hot.fit_transform(train_y.reshape(len(train_y), 1))

    plt.scatter(train_x[:, 0], train_x[:, 1], alpha=0.5, c=train_y)
    plt.show()

    # Network implementation
    nn = NeuralNetwork(layers=[
        layers.Dense(2, input_shape=(2, )),
        layers.Activation('softmax')
    ],
                       optimizer='sgd')

    # Training
    trainer = Trainer(nn,
                      loss='cross-entropy',
                      print_step_mod=100,
                      metrics=['accuracy'])
    trainer.train(train_x, train_y_one_hot, epochs=45, batch_size=20)

    print('Learned parameters:')
    print('weights:', nn.layers[0].W)
    print('biases:', nn.layers[0].b)

    eval_x = np.random.uniform(low=-3, high=13, size=(200, 2))

    plt.scatter(train_x[:, 0], train_x[:, 1], alpha=0.5)
    plt.scatter(eval_x[:, 0],
                eval_x[:, 1],
                alpha=0.5,
                c=(np.argmax(nn.forward(eval_x), axis=1) + 2))
    plt.show()