valid_x = np.arange( 1, len(valid_curve) + 1 ) * train_iters * val_interval # 由于valid中记录的是epochloss,需要对记录点进行转换到iterations valid_y = valid_curve plt.plot(train_x, train_y, label='Train') plt.plot(valid_x, valid_y, label='Valid') plt.legend(loc='upper right') plt.ylabel('loss value') plt.xlabel('Iteration') plt.show() # ============================ inference ============================ BASE_DIR = os.path.dirname(os.path.abspath(__file__)) test_dir = os.path.join(BASE_DIR, "test_data") test_data = RMBDataset(data_dir=test_dir, transform=valid_transform) valid_loader = DataLoader(dataset=test_data, batch_size=1) for i, data in enumerate(valid_loader): # forward inputs, labels = data outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) rmb = 1 if predicted.numpy()[0] == 0 else 100 print("模型获得{}元".format(rmb))
import torchvision.transforms as transforms import torchvision.utils as vutils from tools.my_dataset import RMBDataset from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader from tools.common_tools import set_seed from model.lenet import LeNet set_seed(1) # 设置随机种子 writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix") split_dir = os.path.join("rmb_split") train_dir = os.path.join(split_dir, "train") transform_compose = transforms.Compose( [transforms.Resize((32, 64)), transforms.ToTensor()]) train_data = RMBDataset(data_dir=train_dir, transform=transform_compose) train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True) data_batch, label_batch = next(iter(train_loader)) img_grid = vutils.make_grid(data_batch, nrow=4, normalize=True, scale_each=True) writer.add_image("input img", img_grid, 0) writer.close()
train_transform = transforms.Compose([ transforms.Resize((32, 32)), transforms.RandomCrop(32, padding=4), transforms.RandomGrayscale(p=0.8), transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std), ]) valid_transform = transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std), ]) # 构建MyDataset实例 train_data = RMBDataset(data_dir=train_dir, transform=train_transform) valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform) # 构建DataLoder train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE) # ============================ step 2/5 模型 ============================ net = LeNet(classes=2) net.initialize_weights() # ============================ step 3/5 损失函数 ============================ criterion = nn.CrossEntropyLoss() # 选择损失函数 # ============================ step 4/5 优化器 ============================
import torchvision.transforms as transforms import torchvision.models as models from torch.utils.data import DataLoader from tools.my_dataset import RMBDataset from torch.utils.data import Dataset from model.lenet import LeNet BASE_DIR = os.path.dirname(os.path.abspath(__file__)) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ========================== 1 num_samples=0 flag = 0 # flag = 1 if flag: # train_dir = os.path.join("..", "data", "rmb_split", "train") train_dir = os.path.join("..", "..", "data", "rmb_split", "train") train_data = RMBDataset(data_dir=train_dir) # 构建DataLoder train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True) # ========================== 2 # TypeError: pic should be PIL Image or ndarray. Got <class 'torch.Tensor'> flag = 0 # flag = 1 if flag: train_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.FiveCrop(200), transforms.Lambda(lambda crops: torch.stack( [(transforms.ToTensor()(crop)) for crop in crops])),