示例#1
0
    def test(self, test_iter, W, b):
        X, y = iter(test_iter).next()

        true_labels = d2l.get_fashion_mnist_labels(y.numpy())
        pred_labels = d2l.get_fashion_mnist_labels(
            self.net(X, W, b).argmax(dim=1).numpy())
        titles = [
            true + '\n' + pred for true, pred in zip(true_labels, pred_labels)
        ]

        d2l.show_fashion_mnist(X[0:9], titles[0:9])
def Method0():
    params = [w1, b1, w2, b2]
    for param in params:
        param.attach_grad()
    loss = gloss.SoftmaxCrossEntropyLoss()
    num_epochs, lr = 5, 0.5
    d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
                  params, lr)
    for x, y in test_iter:
        break
    true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
    pre_labels = d2l.get_fashion_mnist_labels(net(x).argmax(axis=1).asnumpy())
    titles = [
        true + '\n' + pred for true, pred in zip(true_labels, pre_labels)
    ]
    d2l.show_fashion_mnist(x[0:9], titles[0:9])
示例#3
0
def method1():
    vnet = nn.Sequential()
    vnet.add(nn.Dense(10))
    vnet.initialize(init.Normal(sigma=0.01))
    loss = gloss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(vnet.collect_params(), 'sgd',
                            {'learning_rate': 0.1})
    num_epochs = 5
    d2l.train_ch3(vnet, train_iter, test_iter, loss, num_epochs, batch_size,
                  None, None, trainer)
    for x, y in test_iter:
        break
    true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
    pre_labels = d2l.get_fashion_mnist_labels(vnet(x).argmax(axis=1).asnumpy())
    titles = [
        true + '\n' + pred for true, pred in zip(true_labels, pre_labels)
    ]
    d2l.show_fashion_mnist(x[0:9], titles[0:9])
示例#4
0
def method0():
    #batch_size = 256
    #train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
    #num_inputs = 28 * 28
    #num_outputs = 10
    #w = nd.random.normal(scale=0.01, shape=(num_inputs, num_outputs))
    #b = nd.zeros(num_outputs)
    #w.attach_grad()
    #b.attach_grad()
    c = evaluate_accuracy(train_iter, net, w, b, num_inputs)
    # print(c)
    num_epochs, lr = 5, 0.1
    train_ch3(net, w, b, num_inputs, train_iter, test_iter, cross_entropy,
              num_epochs, batch_size, [w, b], lr)
    for x, y in test_iter:
        break
    true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
    pre_labels = d2l.get_fashion_mnist_labels(
        net(x, w, b, num_inputs).argmax(axis=1).asnumpy())
    titles = [
        true + '\n' + pred for true, pred in zip(true_labels, pre_labels)
    ]
    d2l.show_fashion_mnist(x[0:9], titles[0:9])
示例#5
0
def net(X):
    X = X.reshape((-1, num_inputs))
    H = relu(nd.dot(X, W1) + b1)
    return nd.dot(H, W2) + b2


# In[7]:

#损失函数
loss = gloss.SoftmaxCrossEntropyLoss()

# In[8]:

num_epochs, lr = 5, 0.5
#调用了3.6中的函数
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params,
              lr)

# In[9]:

for X, y in test_iter:
    break

true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
pred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
titles = [true + '\n' + pred for true, pred in zip(true_labels, pred_labels)]

d2l.show_fashion_mnist(X[0:9], titles[0:9])

# In[ ]:
示例#6
0
          [W, b], lr)

# %% [markdown]
# ## 预测
#
# 训练完成后,现在就可以演示如何对图像进行分类了。给定一系列图像(第三行图像输出),我们比较一下它们的真实标签(第一行文本输出)和模型预测结果(第二行文本输出)。

# %%
for X, y in test_iter:
    break

true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
pred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
titles = [true + '\n' + pred for true, pred in zip(true_labels, pred_labels)]

d2l.show_fashion_mnist(X[10:19], titles[10:19])

# %% [markdown]
# ## 小结
#
# * 可以使用softmax回归做多类别分类。与训练线性回归相比,你会发现训练softmax回归的步骤和它非常相似:获取并读取数据、定义模型和损失函数并使用优化算法训练模型。事实上,绝大多数深度学习模型的训练都有着类似的步骤。
#
# ## 练习
#
# * 在本节中,我们直接按照softmax运算的数学定义来实现softmax函数。这可能会造成什么问题?(提示:试一试计算$\exp(50)$的大小。)
# * 本节中的`cross_entropy`函数是按照[“softmax回归”](softmax-regression.ipynb)一节中的交叉熵损失函数的数学定义实现的。这样的实现方式可能有什么问题?(提示:思考一下对数函数的定义域。)
# * 你能想到哪些办法来解决上面的两个问题?
#
#
#
# ## 扫码直达[讨论区](https://discuss.gluon.ai/t/topic/741)
示例#7
0
 def showPlt(self):
     X, y = self.info()
     d2l.show_fashion_mnist(X, d2l.get_fashion_mnist_labels(y))
示例#8
0
import d2lzh as d2l
from mxnet import gluon, init
from mxnet.gluon import loss as gloss, nn
bathsize = 256
trainer_iter, test_iter = d2l.load_data_fashion_mnist(bathsize)
net = nn.Sequential()
net.add(nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
loss = gloss.SoftmaxCELoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
num = 5
d2l.train_ch3(net, trainer_iter, test_iter, loss, num, bathsize, None, None,
              trainer)
for x, y in test_iter:
    break
truelabes = d2l.get_fashion_mnist_labels(y.asnumpy())
falselabes = d2l.get_fashion_mnist_labels(net(x).argmax(axis=1).asnumpy())
title = [true + '\n' + pred for true, pred in zip(truelabes, falselabes)]
d2l.show_fashion_mnist(x[0:9], title[0:9])
示例#9
0
			else:
				trainer.step(batch_size)
			y = y.astype('float32')
			train_l_sum += l.asscalar()
			train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
			n += y.size
		test_acc = evaluate_accuracy(test_iter, net)
		print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
              % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))	


train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size,
          [W, b], lr)


# show some results
'''
for X, y in test_iter:
	break

true_labels = d2l.get_fashion_mnist_labels(y.asnumpy())
pred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1).asnumpy())
titles = [true + '\n' + pred for true, pred in zip(true_labels, pred_labels)]
d2l.show_fashion_mnist(X[0:9], titles[0:9])
'''



# 1. loss function 这么定义可能存在什么问题?
# 2. softmax 运算这么定义可能存在什么问题?数值不稳定问题