Ejemplo n.º 1
0
def mlp(num_epochs, unit_count, hidden_layer_num=1):
    net = nn.Sequential()
    with net.name_scope():
        for _ in range(hidden_layer_num):
            net.add(gluon.nn.Dense(unit_count, activation="relu"))
        net.add(gluon.nn.Dense(10))
    net.initialize()
    ctx = utils.try_gpu()
    batch_size = 256
    train_data, test_data = utils.load_data_fashion_mnist(batch_size)
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.5})
    return utils.train(train_data, test_data, net, loss, trainer, ctx, num_epochs=num_epochs)
Ejemplo n.º 2
0
def cnn(num_epochs):
    net = nn.Sequential()
    with net.name_scope():
        net.add(
            nn.Conv2D(channels=20, kernel_size=5, activation="relu"),
            nn.MaxPool2D(pool_size=2, strides=2),
            nn.Conv2D(channels=50, kernel_size=3, activation="relu"),
            nn.MaxPool2D(pool_size=2, strides=2),
            nn.Flatten(),
            nn.Dense(128, activation="relu"),
            nn.Dense(10)
        )
    net.initialize()
    ctx = utils.try_gpu()
    batch_size = 256
    train_data, test_data = utils.load_data_fashion_mnist(batch_size)
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})
    return utils.train(train_data, test_data, net, loss, trainer, ctx, num_epochs=num_epochs)
Ejemplo n.º 3
0
from mxnet import nd
from MXnet import utils
from mxnet import autograd as autograd
from mxnet import gluon

batch_size = 256
train_data, test_data = utils.load_data_fashion_mnist(batch_size)

weight_scale = 0.01
w1 = nd.random.normal(shape=(50, 1, 5, 5), scale=weight_scale)
b1 = nd.zeros(w1.shape[0])

w2 = nd.random.normal(shape=(100, 50, 3, 3), scale=weight_scale)
b2 = nd.zeros(w2.shape[0])

w3 = nd.random.normal(shape=(2500, 5000), scale=weight_scale)
b3 = nd.zeros(w3.shape[1])

w4 = nd.random.normal(shape=(w3.shape[1], 10), scale=weight_scale)
b4 = nd.zeros(w4.shape[1])

params = [w1, b1, w2, b2, w3, b3, w4, b4]
[param.attach_grad() for param in params]


def net(X, verbose=False):
    h1_conv = nd.Convolution(data=X,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=w1.shape[0])