def train_gluon_ch7(trainer_name, trainer_hyperparams, features, labels, batch_size=10, num_epochs=2): #初始化模型 net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=0.01)) loss = gloss.L2Loss() def eval_loss(): return loss(net(features), labels).mean().asscalar() ls = [eval_loss()] data_iter = gdata.DataLoader(gdata.ArrayDataset(features, labels), batch_size, shuffle=True) #创建Trainer实例来迭代模型参数 trainer = gluon.Trainer(net.collect_params(), trainer_name, trainer_hyperparams) for _ in range(num_epochs): start = time.time() for batch_i, (X, y) in enumerate(data_iter): with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size)#在Trainer实例里做梯度平均 if (batch_i+1) * batch_size % 100 == 0: ls.append(eval_loss()) #打印结果和作图 print('loss:%f, %f sec per epoch' % (ls[-1], time.time()-start)) d2l.set_figsize() d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls) d2l.plt.xlabel('epoch') d2l.plt.ylabel('loss')
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)): """ 作图函数 :param x_vals: :param y_vals: :param x_label: :param y_label: :param x2_vals: :param y2_vals: :param legend: :param figsize: :return: """ d2l.set_figsize(figsize) d2l.plt.xlabel(x_label) d2l.plt.ylabel(y_label) d2l.plt.semilogy(x_vals, y_vals) if x2_vals and y2_vals: d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':') d2l.plt.legend(legend) d2l.plt.show()
def train_gluon_ch7(trainer_name, hyperparams, features, labels, num_epochs=2, batch_size=10): # 初始化模型 # TODO y = XW + b net, loss = nn.Sequential(), gloss.L2Loss() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=0.01)) # 损失函数,用features,labels学出W,b def eval_loss(): return loss(net(features), labels).mean().asscalar() ls = [eval_loss()] # 记录损失变化 data_iter = gdata.DataLoader(gdata.ArrayDataset(features, labels), batch_size, shuffle=True) # 用Trainer来迭代参数 trainer = gluon.Trainer(net.collect_params(), trainer_name, hyperparams) for _ in range(num_epochs): start = time.time() for batch_i, (X,y) in enumerate(data_iter): with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) # 在trainer做平均 trainer.set_learning_rate(0.1) if (batch_i+1)*batch_size % 100 == 0: # 每100次记录下 ls.append(eval_loss()) print('loss: %f, %f sec per epoch' % (ls[-1], time.time()-start)) d2l.set_figsize(figsize=(15,5)) d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls) # loss曲线 # 坐标轴 d2l.plt.xlabel('epochs') d2l.plt.ylabel('loss')
def train_ch7(trainer_fn, states, hyperparams, features, labels, batch_size=10, num_epochs=2): net, loss = d2l.linreg, d2l.squared_loss w = nd.random.normal(scale=0.01, shape=(features.shape[1],1)) b = nd.zeros(1) w.attach_grad() b.attach_grad() def eval_loss(): return loss(net(features, w, b), labels).mean().asscalar() ls = [eval_loss()] data_iter = gdata.DataLoader(gdata.ArrayDataset(features, labels), batch_size, shuffle=True) for _ in range(num_epochs): start = time.time() for batch_i, (X, y) in enumerate(data_iter): with autograd.record(): l = loss (net(X, w, b), y).mean() l.backward() trainer_fn([w, b], states, hyperparams) if (batch_i + 1) * batch_size % 100 ==0: ls.append(eval_loss()) print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start)) d2l.set_figsize() d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls) d2l.plt.xlabel('epoch') d2l.plt.ylabel('loss')
def xyplot(x_vals, y_vals, name): d2l.set_figsize(figsize=(5, 2.5)) d2l.plt.plot(x_vals.detach().numpy(), y_vals.detach().numpy()) d2l.plt.xlabel('x') d2l.plt.ylabel(name + '(x)') plt.show() plt.close()
def show_trace(res): n = max(abs(min(res)), abs(max(res)), 10) f_line = np.arange(-n, n, 0.1) d2l.set_figsize() d2l.plt.plot(f_line, [x * x for x in f_line]) d2l.plt.plot(res, [x * x for x in res], '-o') d2l.plt.xlabel('x') d2l.plt.ylabel('f(x)')
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)): d2l.set_figsize(figsize) d2l.plt.xlabel(x_label) d2l.plt.ylabel(y_label) d2l.plt.semilogy(x_vals, y_vals) if x2_vals and y_vals: d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':') d2l.plt.legend(legend) d2l.plt.show()
def semilogy(x_vals, y_vals, x_label, y_label, title, x2_vals=None, y2_vals=None, legend=None, figsize=(15,5)): d2l.set_figsize(figsize) # 显示大小 d2l.plt.figure() # 开一个空图片 d2l.plt.title(title) # 加上标题 d2l.plt.xlabel(x_label) d2l.plt.ylabel(y_label) d2l.plt.semilogy(x_vals, y_vals) # 曲线1 if x2_vals and y2_vals: d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':') # 曲线2 d2l.plt.legend(legend) # 曲线标注
def show_trace(res): n = max(abs(min(res)), abs(max(res)), 10) f_line = np.arange(-n, n, 0.1) # [-10,20] d2l.set_figsize(figsize=(15, 5)) d2l.plt.plot(f_line, [x**2 for x in f_line]) # y=x^2 d2l.plt.plot(res, [x**2 for x in res], '-o') # x的轨迹 # 坐标轴 d2l.plt.xlabel('x') d2l.plt.ylabel('f(x)')
color = colors[i % len(colors)] rect = d2l.bbox_to_rect(bbox.asnumpy(), color) axes.add_patch(rect) if labels and len(labels) > i: text_color = 'k' if color == 'w' else 'w' axes.text(rect.xy[0], rect.xy[1], labels[i], va='center', ha='center', fontsize=9, color=text_color, bbox=dict(facecolor=color, lw=0)) d2l.set_figsize() bbox_scale = nd.array((w, h, w, h)) fig = d2l.plt.imshow(img) show_bboxes(fig.axes, boxes[250, 250, :, :] * bbox_scale, [ 's=0.75, r=1', 's=0.5, r=1', 's=0.25, r=1', 's=0.75, r=2', 's=0.75, r=0.5' ]) #9.4.2-交并比 #9.4.3-标注训练集的锚框 ground_truth = nd.array([[0, 0.1, 0.08, 0.52, 0.92], [1, 0.55, 0.2, 0.9, 0.88]]) anchors = nd.array([[0, 0.1, 0.2, 0.3], [0.15, 0.2, 0.4, 0.4], [0.63, 0.05, 0.88, 0.98], [0.66, 0.45, 0.8, 0.8], [0.57, 0.3, 0.92, 0.9]]) fig = d2l.plt.imshow(img) show_bboxes(fig.axes, ground_truth[:, 1:] * bbox_scale, ['dog', 'cat'], 'k') show_bboxes(fig.axes, anchors * bbox_scale, ['0', '1', '2', '3', '4'])
def xyplot(x_vals, y_vals, name): d2l.set_figsize(figsize=(5, 2.5)) d2l.plt.plot(x_vals.asnumpy(), y_vals.asnumpy()) d2l.plt.xlabel('x') d2l.plt.ylabel(name + '(x)') d2l.plt.show()
img = image.imread('../img/pikachu.jpg') feature = image.imresize(img, 256, 256).astype('float32') X = feature.transpose((2, 0, 1)).expand_dims(axis=0) def predict(X): anchor, cls_preds, bbox_preds = net(X.as_in_context(ctx)) cls_probs = cls_preds.softmax().transpose((0, 2, 1)) output = contrib.nd.MultiBoxDetection(cls_probs, bbox_preds, anchors) idx = [i for i, row in enumerate(output[0]) if row[0].asscalar() != -1] return output[0, idx] output = predict(X) d2l.set_figsize((5, 5)) def display(img, output, threshold): fig = d2l.plt.imshow(img.asnumpy()) for row in output: score = row[1].asscalar() if score < threshold: continue h, w = img.shape[0:2] bbox = [row[2:6] * nd.array((w, h, w, h), ctx=row.context)] d2l.show_bboxes(fig.axes, bbox, '%.2f' % score, 'w') display(img, output, threshold=0.3)
def xyplot(x_vals, y_vals, name): d2l.set_figsize(figsize=(5, 2.5)) d2l.plt.plot(x_vals.asnumpy(), y_vals.asnumpy()) d2l.plt.xlabel("x") d2l.plt.ylabel(name + "(x)")
def xypolt(x, y, name): d2l.set_figsize(figsize=(15, 5)) d2l.plt.figure() d2l.plt.plot(x.asnumpy(), y.asnumpy()) d2l.plt.xlabel('x') d2l.plt.ylabel(name + '(x)')
""" @Author: [email protected] @Date: 2020-06-08 18:05:53 @LastEditors: [email protected] @LastEditTime: 2020-06-10 14:20:52 @FilePath: /d2l-zh/optimization-algorithm/optimization-and-deepling-learning.py """ import d2lzh as d2l from mpl_toolkits import mplot3d import numpy as np def f(x): return x * np.cos(np.pi * x) if __name__ == "__main__": d2l.set_figsize((4.5, 2.5)) x = np.arange(-1.0, 2.0, 0.1) fig, = d2l.plt.plot(x, f(x)) fig.axes.annotate('local minimum', xy=(-0.3, -0.25), xytext=(-0.77, -1.0), arrowprops=dict(arrowstyle='->')) fig.axes.annotate('global minimum', xy=(1.1, -0.95), xytext=(0.6, 0.8), arrowprops=dict(arrowstyle='->')) d2l.plt.xlabel('x') d2l.plt.ylabel('f(x)') d2l.plt.show()
import d2lzh as d2l from mpl_toolkits import mplot3d import numpy as np # TODO f(x)=xcos(pi*x) [-1.0, 2.0] def f(x): return x * np.cos(np.pi * x) d2l.set_figsize(figsize=(15, 5)) x = np.arange(-1.0, 2.0, 0.1) fig, = d2l.plt.plot(x, f(x)) fig.axes.annotate('local minimum', xy=(-0.3, -0.25), xytext=(-0.77, -1.0), arrowprops=dict(arrowstyle='->')) fig.axes.annotate('global minimum', xy=(1.1, -0.95), xytext=(0.6, 0.8), arrowprops=dict(arrowstyle='->')) d2l.plt.xlabel('x') d2l.plt.ylabel('f(x)') # TODO f(x)=x^3 [-2, 2.0] x = np.arange(-2.0, 2.0, 0.1) fig, = d2l.plt.plot(x, x**3) # 给曲线添加一些注释,xy是标记点,xytext是文字的位置 fig.axes.annotate('saddle point', xy=(0, 0), xytext=(-0.52, -5.0),