Ejemplo n.º 1
0
def basic_multilayer():
    """
    基本的多层感知机实现
    :return:
    """

    def relu(X):
        return nd.maximum(X, 0)

    def net(X):
        X = X.reshape((-1, num_inputs))
        H = relu(nd.dot(X, W1) + b1)
        return nd.dot(H, W2) + b2

    batch_size = 256
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

    num_inputs, num_outputs, num_hiddens = 784, 10, 256

    W1 = nd.random.normal(scale=0.01, shape=(num_inputs, num_hiddens))
    b1 = nd.zeros(num_hiddens)
    W2 = nd.random.normal(scale=0.01, shape=(num_hiddens, num_outputs))
    b2 = nd.zeros(num_outputs)
    params = [W1, b1, W2, b2]

    for param in params:
        param.attach_grad()

    loss = gloss.SoftmaxCrossEntropyLoss()

    num_epochs, lr = 5, 0.5
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)
Ejemplo n.º 2
0
    def generate(self):
        net = nn.Sequential()
        net.add(nn.Dense(256, activation='relu'), nn.Dense(10))
        net.initialize(init.Normal(sigma=0.01))
        batch_size = 256
        train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

        loss = gloss.SoftmaxCrossEntropyLoss()
        trainer = gluon.Trainer(net.collect_params(), 'sgd',
                                {'learning_rate': 0.5})
        num_epochs = 5
        d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
                      None, None, trainer)
Ejemplo n.º 3
0
def method1():
    num_epochs, lr, batch_size = 5, 0.5, 256
    loss = gloss.SoftmaxCrossEntropyLoss()
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
    net = nn.Sequential()
    net.add(nn.Dense(num_hiddens1, activation="relu"),
            nn.Dropout(drop_prob1),
            nn.Dense(num_hiddens2, activation="relu"),
            nn.Dropout(drop_prob2),
            nn.Dense(num_outputs))
    net.initialize(init.Normal(sigma=0.01))
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None,
                  None, trainer)
Ejemplo n.º 4
0
def Method0():
    params = [w1, b1, w2, b2]
    for param in params:
        param.attach_grad()
    loss = gloss.SoftmaxCrossEntropyLoss()
    num_epochs, lr = 5, 0.5
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
                  params, lr)
    for x, y in test_iter:
        break
    true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
    pre_labels = d2l.get_fashion_mnist_labels(net(x).argmax(axis=1).asnumpy())
    titles = [
        true + '\n' + pred for true, pred in zip(true_labels, pre_labels)
    ]
    d2l.show_fashion_mnist(x[0:9], titles[0:9])
Ejemplo n.º 5
0
def main():
    batch_size = 256
    # 下载、读取数据集
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
    # 定义初始化模型
    net = nn.Sequential()
    net.add(nn.Dense(10))
    net.initialize(init.Normal(sigma=0.01))
    # 定义损失函数
    loss = gloss.SoftmaxCrossEntropyLoss()
    # 定义优化算法
    trainer = gluon.Trainer(net.collect_params(), 'sgd',
                            {'learning_rate': 0.01})
    num_epochs = 5
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
                  None, None, trainer)
Ejemplo n.º 6
0
def simple_multilayer():
    """
    多层感知机简洁实现
    :return:
    """
    net = nn.Sequential()
    net.add(nn.Dense(256, activation='relu'), nn.Dense(10))
    net.add(gluon.nn.Dropout(0.2))
    net.initialize(init.Normal(sigma=0.01))

    batch_size = 256
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

    loss = gloss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})

    num_epochs = 10
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, trainer)
Ejemplo n.º 7
0
def dropout_gluon():
    drop_prob1, drop_prob2, lr, batch_size, num_epochs = 0.2, 0.5, 0.1, 64, 50

    net = nn.Sequential()
    net.add(
        nn.Dense(256, activation="relu"),
        nn.Dropout(drop_prob1),  # 在第一个全连接层后添加丢弃层
        nn.Dense(256, activation="relu"),
        nn.Dropout(drop_prob2),  # 在第二个全连接层后添加丢弃层
        nn.Dense(10))
    net.initialize(init.Normal(sigma=0.01))

    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)

    loss = gloss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
                  None, None, trainer)
Ejemplo n.º 8
0
def method1():
    vnet = nn.Sequential()
    vnet.add(nn.Dense(10))
    vnet.initialize(init.Normal(sigma=0.01))
    loss = gloss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(vnet.collect_params(), 'sgd',
                            {'learning_rate': 0.1})
    num_epochs = 5
    d2l.train_ch3(vnet, train_iter, test_iter, loss, num_epochs, batch_size,
                  None, None, trainer)
    for x, y in test_iter:
        break
    true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
    pre_labels = d2l.get_fashion_mnist_labels(vnet(x).argmax(axis=1).asnumpy())
    titles = [
        true + '\n' + pred for true, pred in zip(true_labels, pre_labels)
    ]
    d2l.show_fashion_mnist(x[0:9], titles[0:9])
Ejemplo n.º 9
0
#softmax回归的简洁实现
#%matplotlib inline
import d2lzh as d2l
from mxnet import gluon, init
from mxnet.gluon import loss as gloss, nn

#3.10.0 获取和读取数据
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
#3.10.1 定义和初始化模型
net = nn.Sequential()
net.add(nn.Dense(256, activation='relu'), nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
#softmax和交叉熵损失函数
loss = gloss.SoftmaxCrossEntropyLoss()
#定义优化算法
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})
#训练模型
num_epochs = 5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None,
              None, trainer)

Ejemplo n.º 10
0
import d2lzh as d2l
from mxnet import gluon, init
from mxnet.gluon import loss as gloss, nn
bathsize = 256
trainer_iter, test_iter = d2l.load_data_fashion_mnist(bathsize)
net = nn.Sequential()
net.add(nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
loss = gloss.SoftmaxCELoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
num = 5
d2l.train_ch3(net, trainer_iter, test_iter, loss, num, bathsize, None, None,
              trainer)
for x, y in test_iter:
    break
truelabes = d2l.get_fashion_mnist_labels(y.asnumpy())
falselabes = d2l.get_fashion_mnist_labels(net(x).argmax(axis=1).asnumpy())
title = [true + '\n' + pred for true, pred in zip(truelabes, falselabes)]
d2l.show_fashion_mnist(x[0:9], title[0:9])
Ejemplo n.º 11
0
 def train(self):
     num_epochs, lr = 5, 100.0
     d2l.train_ch3(self.net, self.train_iter, self.test_iter, self.loss,
                   num_epochs, self.batch_size, self.params, lr)
     pass
Ejemplo n.º 12
0
W1 = nd.random.normal(scale=0.01, shape=(number_inputs, number_hiddens))
b1 = nd.zeros(number_hiddens)

W2 = nd.random.normal(scale=0.01, shape=(number_hiddens, number_outputs))
b2 = nd.zeros(number_outputs)

parameters = [W1, b1, W2, b2]

for parameter in parameters:
    parameter.attach_grad()


def relu(X):
    return nd.maximum(X, 0)


def net(X):
    X = X.reshape((-1, number_inputs))
    H = relu(nd.dot(X, W1) + b1)
    return nd.dot(H, W2) + b2


loss = gloss.SoftmaxCrossEntropyLoss()

# hyper parameter
number_epochs = 5
learning_rate = 0.5
d2l.train_ch3(net, iter_trainning, iter_testing, loss, number_epochs,
              batch_size, parameters, learning_rate)
Ejemplo n.º 13
0
def net(X):
    X = X.reshape((-1, num_inputs))
    H = relu(nd.dot(X, W1) + b1)
    return nd.dot(H, W2) + b2


# In[7]:

#损失函数
loss = gloss.SoftmaxCrossEntropyLoss()

# In[8]:

num_epochs, lr = 5, 0.5
#调用了3.6中的函数
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params,
              lr)

# In[9]:

for X, y in test_iter:
    break

true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
pred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
titles = [true + '\n' + pred for true, pred in zip(true_labels, pred_labels)]

d2l.show_fashion_mnist(X[0:9], titles[0:9])

# In[ ]:
Ejemplo n.º 14
0
def method0():
    num_epochs, lr, batch_size = 5, 0.5, 256
    loss = gloss.SoftmaxCrossEntropyLoss()
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
                  params, lr)
Ejemplo n.º 15
0
    return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()


def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    for X, y in data_iter:
        y = y.astype('float32')
        acc_sum += (net(X).argmax(axis=1) == y).sum().asscalar()
        n += y.size
    return acc_sum / n


# *accuracy函数的小测试
print('accuracy test:', accuracy(y_hat, y))
print('accuracy,d2l_test:', d2l.evaluate_accuracy(data_iter=test_iter,
                                                  net=net))

# 6 # 训练模型 =========================================================================================
num_epoch, lr = 5, 0.1
d2l.train_ch3(net, train_iter, test_iter, cross_entropy, num_epoch, batch_size,
              [W, b], lr)

# 7 # 分类预测 ========================================================
for X, y in test_iter:
    break
true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
pred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
titles = [true + '\n' + pred for true, pred in zip(true_labels, pred_labels)]

d2l.show_fashion_mnist(X[0:9], titles[0:9])
Ejemplo n.º 16
0
#
# DLN is free software; you can redistribute it and/or modify it.
#
# DLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Contributors:
#     Xiao Wang - initial implementation

import d2lzh as d2l
from mxnet import gluon, init
from mxnet.gluon import loss as gloss, nn

# load data
batch_size = 256
iter_trainning, iter_testing = d2l.load_data_fashion_mnist(batch_size)

net = nn.Sequential()
net.add(nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))

loss = gloss.SoftmaxCrossEntropyLoss()

trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})

# hyper parameter
number_epochs = 5
d2l.train_ch3(net, iter_trainning, iter_testing, loss, number_epochs,
              batch_size, None, None, trainer)
Ejemplo n.º 17
0
    def train(self):
        d2l.train_ch3(self.net, self.train_iter, self.test_iter, self.loss,
                      self.num_epochs, self.batch_size, None, None,
                      self.optimizer)

        return self.model