Exemplo n.º 1
0
def main(args):
    # Import data
    mnist = input_data.read_data_sets(args.data_dir, one_hot=True)

    model = Model(layers=[
        Dense(10,
              weight_initializer=weight_initializer,
              bias_initializer=bias_initializer,
              input_shape=(784, ))
    ],
                  optimizer=GradientDescent(learning_rate=0.5),
                  loss=SoftMaxCrossEntropyWithLogits())

    # Train
    for _ in range(1000):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        model.fit_batch(batch_xs, batch_ys)

    # Test trained model
    actual_labels = np.argmax(mnist.test.labels, 1)
    predictions = model.predict(mnist.test.images)
    predicted_labels = np.argmax(predictions, 1)

    accuracy = (actual_labels == predicted_labels).mean()
    print("Test accuracy: {}".format(accuracy))
Exemplo n.º 2
0
def main(args):
    # Import data
    mnist = input_data.read_data_sets(args.data_dir, one_hot=True)
    relu = ReLU()

    model = Model(layers=[
        Conv2D(filter_size=(10, 10),
               input_shape=(28, 28, 1),
               stride=(1, 1),
               channels=32,
               activation=relu,
               padding='same',
               filter_initializer=weight_initializer,
               bias_initializer=bias_initializer),
        MaxPool2D(pool_size=(2, 2)),
        Conv2D(filter_size=(5, 5),
               stride=(1, 1),
               channels=16,
               activation=relu,
               padding='same',
               filter_initializer=weight_initializer,
               bias_initializer=bias_initializer),
        MaxPool2D(pool_size=(2, 2)),
        Flatten(),
        Dense(1024,
              weight_initializer=weight_initializer,
              bias_initializer=bias_initializer,
              activation=relu),
        Dense(10,
              weight_initializer=weight_initializer,
              bias_initializer=bias_initializer)
    ],
                  optimizer=AdaGrad(learning_rate=0.001, epsilon=1e-8),
                  loss=SoftMaxCrossEntropyWithLogits())

    # Train
    for _ in range(200):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        batch_xs = np.reshape(batch_xs, [-1, 28, 28, 1])
        model.fit_batch(batch_xs, batch_ys)
        if args.verbose:
            print('Batch {} loss: {}'.format(
                model.batch_number,
                model.loss.compute(model.predict(batch_xs), batch_ys)))

    # Test trained model
    actual_labels = np.argmax(mnist.test.labels, 1)
    predictions = model.predict(np.reshape(mnist.test.images, [-1, 28, 28, 1]))
    predicted_labels = np.argmax(predictions, 1)

    accuracy = (actual_labels == predicted_labels).mean()
    print("Test accuracy: {}".format(accuracy))