def test(): transformImg = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) test = torchvision.datasets.MNIST( root='D:\\github\\MNIST_mianjin\\data\\data', train=False, download=True, transform=transformImg) test_loader = torch.utils.data.DataLoader(test, num_workers=4) net = LeNet5() net.cuda() # 加载模型参数 name = 'checkpoints/LeNet5_1.pth' net.load_state_dict(torch.load(name)) print(net) # 在测试集上验证模型准确率 correct = 0 total = 0 for test_data in test_loader: total += 1 inputs, actual_val = test_data predicted_val = net(torch.autograd.Variable(inputs.cuda())) predicted_val = predicted_val.cpu().data # 从GPU tensor转为CPU tensor max_score, idx = torch.max(predicted_val, 1) correct += (idx == actual_val).sum() correct = correct.numpy() print("Classifier Accuracy: ", correct / total * 100)
def main(train_cfg): cfg = yaml2dict(train_cfg) net = LeNet5() train_loader = load_mnist(batch_size=128) train_loader, val_loader, test_loader = load_mnist(batch_size=128, num_workers=64) def bo_eval_func(params): net_bo = LeNet5() score = train_eval(net_bo, train_loader, val_loader, params, dtype, device) return score parameters = [{ "name": "lr", "type": "range", "bounds": [1e-6, 0.4], "log_scale": True }, { "name": "beta1", "type": "range", "bounds": [0.0, 1.0] }, { "name": "beta2", "type": "range", "bounds": [0.0, 1.0] }, { "name": "weight_decay", "type": "range", "bounds": [0.0, 0.01] }] best_parameters, values, experiment, model = optimize( parameters, evaluation_function=bo_eval_func, objective_name="accuracy") print(best_parameters) combined_train_valid_set = torch.utils.data.ConcatDataset([ train_loader.dataset.dataset, val_loader.dataset.dataset, ]) combined_train_valid_loader = torch.utils.data.DataLoader( combined_train_valid_set, batch_size=128, shuffle=True, num_workers=64) acc = train_eval(net, combined_train_valid_loader, test_loader, best_parameters, dtype, device) print("Final accuracy is {}".format(acc))
def train_models(trainX, testX, trainY, testY): print("[INFO] initialize models") models = [] models.append(SimpleNet(width, height, depth, classes)) models.append(LeNet5(width, height, depth, classes)) print("[INFO] the number of models :", len(models)) Hs = [] # 遍历模型训练个数 for i in np.arange(0, len(models)): # 初始化优化器和模型 print("[INFO] training model {}/{}".format(i + 1, len(models))) # opt = Adam(lr=lr, decay=1e-4 / epochs) # opt = SGD(lr=0.001,decay=0.01/ 40,momentum=0.9, # nesterov=True) # 训练网络 H = models[i].fit_generator(aug.flow(trainX, trainY, batch_size=bs), validation_data=(testX, testY), epochs=epochs, steps_per_epoch=len(trainX) // bs, verbose=1) # 将模型保存到磁盘中 p = ['save_models', "model_{}.model".format(i)] models[i].save(os.path.sep.join(p)) Hs.append(models[i]) # plot the training loss and accuracy N = epochs p = ['model_{}.png'.format(i)] plt.style.use('ggplot') plt.figure() plt.plot(np.arange(0, N), H.history['loss'], label='train_loss') plt.plot(np.arange(0, N), H.history['val_loss'], label='val_loss') plt.plot(np.arange(0, N), H.history['acc'], label='train-acc') plt.plot(np.arange(0, N), H.history['val_acc'], label='val-acc') plt.title("Training Loss and Accuracy for model {}".format(i)) plt.xlabel("Epoch #") plt.ylabel("Loss/Accuracy") plt.legend() plt.savefig(os.path.sep.join(p)) plt.close()
def get_model(model_type, use_gpu, prune): """Return selected network model. """ if model_type == 'lenet-300-100': from models.LeNet_300_100 import LeNet net = LeNet(prune) elif model_type == 'lenet5': from models.LeNet5 import LeNet5 net = LeNet5(prune) else: print('The selected model is unavailable.') sys.exit() if use_gpu: net = net.cuda() return net
def main(train_cfg): cfg = yaml2dict(train_cfg) net = LeNet5() ''' if torch.cuda.device_count() > 1: net = torch.nn.parallel.DataParallel(net) ''' train_loader, val_loader, test_loader = load_mnist(batch_size=128, num_workers=64) combined_train_valid_set = torch.utils.data.ConcatDataset([ train_loader.dataset.dataset, val_loader.dataset.dataset, ]) combined_train_valid_loader = torch.utils.data.DataLoader( combined_train_valid_set, batch_size=512, shuffle=True, num_workers=128) acc = train_eval(net, combined_train_valid_loader, test_loader, cfg, dtype, device) print("Final accuracy is {}".format(acc))
def bo_eval_func(params): net_bo = LeNet5() score = train_eval(net_bo, train_loader, val_loader, params, dtype, device) return score
svhn_trans = transforms.Compose([ transforms.Grayscale(num_output_channels=1), transforms.Resize((32, 32)), transforms.ToTensor() ]) test_svhn_loader = torch.utils.data.DataLoader(dsets.SVHN( './data', split='test', download=True, transform=svhn_trans), batch_size=test_batch_size, shuffle=False) # Model and loss model = [] modelparams = [] for k in range(num_agents): net = LeNet5().to(device) model.append(net) modelparams.append(copy.deepcopy(net)) criterion = nn.CrossEntropyLoss(reduction='sum') # Optimizer optimizerlist = [] iterno = 1 for k in range(num_agents): optimizer = dsgld(model[k].parameters(), allmodels=modelparams, adj_vec=adj[k, :], alpha=alpha0, beta=beta0, norm_sigma=0.0,
def train(): transformImg = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train = torchvision.datasets.MNIST( root='D:\\github\\MNIST_mianjin\\data\\data', train=True, download=False, transform=transformImg) # train分成训练集和验证集(8:2) idx = list(range(len(train))) np.random.seed(1009) np.random.shuffle(idx) #(打乱顺序) train_idx = idx[:int(0.8 * len(idx))] valid_idx = idx[int(0.8 * len(idx)):] # 产生训练集和测试集样本 train_set = torch.utils.data.sampler.SubsetRandomSampler(train_idx) valid_set = torch.utils.data.sampler.SubsetRandomSampler(valid_idx) # Load training and validation data based on above samples # Size of an individual batch during training and validation is 30 # Both training and validation datasets are shuffled at every epoch by 'SubsetRandomSampler()'. Test set is not shuffled. train_loader = torch.utils.data.DataLoader(train, batch_size=30, sampler=train_set, num_workers=4) valid_loader = torch.utils.data.DataLoader(train, batch_size=30, sampler=valid_set, num_workers=4) net = LeNet5() net.cuda() # 定义损失函数 loss_func = torch.nn.CrossEntropyLoss() # 定义优化函数 optimization = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) # 开始训练 numEpochs = 20 training_accuracy = [] validation_accuracy = [] for epoch in range(numEpochs): epoch_training_loss = 0.0 num_batches = 0 for batch_num, training_batch in enumerate(train_loader): inputs, labels = training_batch inputs, labels = torch.autograd.Variable( inputs.cuda()), torch.autograd.Variable(labels.cuda()) optimization.zero_grad() forward_output = net(inputs) loss = loss_func(forward_output, labels) loss.backward() optimization.step() epoch_training_loss += loss.data num_batches += 1 print("epoch: ", epoch, ", loss: ", epoch_training_loss / num_batches) # 计算训练集的准确率 accuracy = 0.0 num_batches = 0 for batch_num, training_batch in enumerate( train_loader): # 'enumerate' is a super helpful function num_batches += 1 inputs, actual_val = training_batch predicted_val = net(torch.autograd.Variable(inputs.cuda())) # convert 'predicted_val' tensor to numpy array and use 'numpy.argmax()' function predicted_val = predicted_val.cpu().data.numpy( ) # convert cuda() type to cpu(), then convert it to numpy predicted_val = np.argmax( predicted_val, axis=1) # retrieved max_values along every row accuracy += accuracy_score(actual_val.numpy(), predicted_val) training_accuracy.append(accuracy / num_batches) # 计算验证集的准确率 accuracy = 0.0 num_batches = 0 for batch_num, validation_batch in enumerate( valid_loader): # 'enumerate' is a super helpful function num_batches += 1 inputs, actual_val = validation_batch # perform classification predicted_val = net(torch.autograd.Variable(inputs.cuda())) # convert 'predicted_val' tensor to numpy array and use 'numpy.argmax()' function predicted_val = predicted_val.cpu().data.numpy( ) # convert cuda() type to cpu(), then convert it to numpy predicted_val = np.argmax( predicted_val, axis=1) # retrieved max_values along every row # accuracy accuracy += accuracy_score(actual_val.numpy(), predicted_val) validation_accuracy.append(accuracy / num_batches) # 保存模型 models_name = 'checkpoints/LeNet5.pth' torch.save(net.state_dict(), models_name) epochs = list(range(numEpochs)) # plotting training and validation accuracies fig1 = pyplot.figure() pyplot.plot(epochs, training_accuracy, 'r') pyplot.plot(epochs, validation_accuracy, 'g') pyplot.xlabel("Epochs") pyplot.ylabel("Accuracy") pyplot.show(fig1)
import torch import torchvision import torch.nn as nn import torch.optim as optim from dataset import Dataset from models.MyCNN import MyCNN from models.LeNet5 import LeNet5 import cv2 dst = Dataset() # 模型实例化 my_model = MyCNN() my_model = LeNet5() # 定义损失函数 loss_fn = nn.CrossEntropyLoss() # 定义优化器 optimizer = optim.Adam(my_model.parameters(), lr=1e-4) # 将模型的所有参数拷贝到到GPU上 if torch.cuda.is_available(): my_model = my_model.cuda() def show(): imgs, labels = next(iter(dst.loader_train)) # 将一个批次的图拼成雪碧图展示 # 此时img的维度为[channel, height, width] img = torchvision.utils.make_grid(imgs) # 转换为numpy数组并调整维度为[height, width, channel] # 因为下面的cv2.imshow()方法接受的数据的维度应该这样