def show_trace(res): n = max(abs(min(res)), abs(max(res))) f_line = np.arange(-n, n, 0.01) d2l.set_figsize((3.5, 2.5)) d2l.plt.plot(f_line, [f(x) for x in f_line], '-') d2l.plt.plot(res, [f(x) for x in res], '-o') d2l.plt.xlabel('x') d2l.plt.ylabel('f(x)')
def show_trace(res): n = max(abs(min(res)), abs(max(res)), 10) f_line = np.arange(-n, n, 0.1) utils.set_figsize() utils.plt.plot(f_line, [x * x for x in f_line]) utils.plt.plot(res, [x * x for x in res], '-o') utils.plt.xlabel('x') utils.plt.ylabel('f(x)') utils.plt.show()
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals = None, y2_vals = None, legend = None, figsize = (3.5, 2.5)): utils.set_figsize(figsize) utils.plt.xlabel(x_label) utils.plt.ylabel(y_label) utils.plt.semilogy(x_vals, y_vals) if x2_vals and y2_vals: utils.plt.semilogy(x2_vals, y2_vals, linestyle=":") utils.plt.legend(legend) utils.plt.show()
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)): d2l.set_figsize(figsize) d2l.plt.xlabel(x_label) d2l.plt.ylabel(y_label) d2l.plt.semilogy(x_vals, y_vals) if x2_vals and y2_vals: d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':') d2l.plt.legend(legend)
def train_ch7(optimizer_fn, states, hyperparams, features, labels, batch_size=10, num_epochs=2): # 初始化模型 net, loss = d2l.linreg, d2l.squared_loss w = torch.nn.Parameter(torch.tensor(np.random.normal( 0, 0.01, size=(features.shape[1], 1)), dtype=torch.float32), requires_grad=True) # [5, 1] b = torch.nn.Parameter(torch.zeros(1, dtype=torch.float32), requires_grad=True) # [1,] def eval_loss(): return loss(net(features, w, b), labels).mean().item() ls = [eval_loss()] data_iter = torch.utils.data.DataLoader(torch.utils.data.TensorDataset( features, labels), batch_size, shuffle=True) for _ in range(num_epochs): start = time.time() for batch_i, (X, y) in enumerate(data_iter): # [1500, 5], [1500,] l = loss(net(X, w, b), y).mean() # 使用平均损失 # 梯度清零 if w.grad is not None: w.grad.data.zero_() b.grad.data.zero_() l.backward() optimizer_fn([w, b], states, hyperparams) # 迭代模型参数 if (batch_i + 1) * batch_size % 100 == 0: ls.append(eval_loss()) # 每100个样本记录下当前训练误差 # 打印结果和作图 print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start)) d2l.set_figsize() d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls) d2l.plt.xlabel('epoch') d2l.plt.ylabel('loss')
""" 图像增广 """ import time import torch from torch import nn, optim from torch.utils.data import Dataset, DataLoader import torchvision from PIL import Image import sys sys.path.append("..") import utils device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') utils.set_figsize() img = Image.open('C:/Users/yuange/Pictures/duck.jpg') utils.plt.imshow(img) def apply(img, aug, num_rows=2, num_cols=4, scale=1.5): Y = [aug(img) for _ in range(num_rows * num_cols)] utils.show_images(Y, num_rows, num_cols, scale) # 翻转和剪裁 apply(img, torchvision.transforms.RandomHorizontalFlip()) apply(img, torchvision.transforms.RandomVerticalFlip()) shape_aug = torchvision.transforms.RandomResizedCrop(200, scale=(0.1, 1), ratio=(0.5, 2)) apply(img, shape_aug) # 变化颜色 apply(img, torchvision.transforms.ColorJitter(brightness=0.5))
import numpy as np import math import torch import os from PIL import Image import utils as d2l d2l.set_figsize() img = Image.open('input/img2083/img/catdog.jpg') # d2l.plt.imshow(img) # 加分号只显示图 w, h = img.size print("w = %d, h = %d" % (w, h)) # bbox是bounding box的缩写 561*728 dog_bbox, cat_bbox = [60, 45, 378, 516], [400, 112, 655, 493] def bbox_to_rect(bbox, color): # 本函数已保存在d2lzh_pytorch中方便以后使用 # 将边界框(左上x, 左上y, 右下x, 右下y)格式转换成matplotlib格式: # ((左上x, 左上y), 宽, 高) return d2l.plt.Rectangle(xy=(bbox[0], bbox[1]), width=bbox[2] - bbox[0], height=bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=2) # fig = d2l.plt.imshow(img) # fig.axes.add_patch(bbox_to_rect(dog_bbox, 'blue')) # fig.axes.add_patch(bbox_to_rect(cat_bbox, 'red'))