def train(): # net = alexnet() # print(net) # use_gpu = True # if use_gpu: # net = net.cuda() # x, y = Generations(200) torch_device = torch.device('cuda') # train_x, train_y, test_x, test_y, val_x, val_y = getData() #load net layer = 1 #channels use_gpu = True #是否使用gpu pretrained = False #是否使用与训练模型 batch_size = 30 netlist = ['mobilenet','resnet','shufflenet','squeezenet','alexnet','densenet','googlenet','mnastnet','vgg16'] # netlist = ['mobilenet','resnet','shufflenet','squeezenet','alexnet','densenet','googlenet','mnastnet'] # netlist = ['mobilenet','resnet','vgg16'] # netlist = ['googlenet'] Allacc = [] Alllos = [] val_Allacc = [] val_Alllos = [] test_Allacc = [] test_Alllos = [] for netname in netlist: if netname=='mobilenet': net = modelnet.mobilenet(layer,use_gpu,pretrained) elif netname=='resnet': net = modelnet.resnet(layer,use_gpu,pretrained) elif netname=='shufflenet': net = modelnet.shufflenet(layer,use_gpu,pretrained) elif netname=='squeezenet': net = modelnet.squeezenet(layer,use_gpu,pretrained) elif netname=='alexnet': net = modelnet.alexnet(layer,use_gpu,pretrained) elif netname=='densenet': net = modelnet.densenet(layer,use_gpu,pretrained) elif netname=='googlenet': net = modelnet.googlenet(layer,use_gpu,pretrained) elif netname=='mnastnet': net = modelnet.mnasnet(layer,use_gpu,pretrained) elif netname=='vgg16': net = modelnet.vgg16(layer,use_gpu,pretrained) # print(netname) print(net) # Loss and Optimizer criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(),lr=1e-3) # optimizer = torch.optim.Adam(net.classifier.parameters()) # Train the model y0 = np.zeros(3000,dtype=np.int) y1 = np.ones(3000, dtype=np.int) y2 = np.ones(3000, dtype=np.int)*2 y_label = np.concatenate((y0,y1,y2),axis=0) scale = 0 loc = 1 ##生成测试图像 maxacc = [] Accuracy_list = [] Loss_list = [] val_Accuracy_list = [] val_Loss_list = [] test_Accuracy_list = [] test_Loss_list = [] tempacc = 0 num_i = 4 for epoch in range(1): # optimizer = torch.optim.Adam(net.parameters()) #打乱数据和标签 num = random.randint(1,2000) random.seed(num) random.shuffle(y_label) index_in_epoch = 0 running_loss = 0.0 running_correct = 0 batch = 0 for iters in range(int(y_label.__len__()/batch_size)): batch += 1 mask = np.random.normal(size=(224,224), scale=scale, loc=loc) #loc表示均值,scale表示方差,size表示输出的size batch_x, batch_y, index_in_epoch = _next_batch(y_label, batch_size, index_in_epoch,mask,num_i) # for step, (inputs, labels) in enumerate(trainset_loader): # batch_xs = preprocess(batch_xs,layer) # batch_x = np.array([t.numpy() for t in batch_xs]) # optimizer.zero_grad() # 梯度清零 labels = batch_y.copy() tempdata = np.reshape(batch_x,(batch_size, 1, 224, 224)) batch_xx = torch.tensor(tempdata, dtype=torch.float) if use_gpu==True: # batch_xx = batch_xx.to(torch_device) batch_xx,labels = Variable(torch.tensor(batch_xx).cuda()), Variable(torch.tensor(labels).cuda()) else: batch_xx,labels = Variable(batch_xx), Variable(labels) optimizer.zero_grad() output = net(batch_xx) if netname=='googlenet': if len(output)==3: output = output.logits _,pred = torch.max(output.data, 1) # loss = criterion(output, onehotLab(labels, False)) loss = criterion(output, labels) loss = loss.requires_grad_() loss.backward() optimizer.step() running_loss += loss.data # running_loss += loss.item() running_correct += torch.sum(pred == labels) if running_correct.item()/(batch_size*batch) > 0: print("Batch {}, Train Loss:{:.6f}, Train ACC:{:.4f}".format( batch, running_loss/(batch_size*batch), running_correct.item()/(batch_size*batch))) # print('预测标签:{}, 真实标签:{}'.format(pred, labels)) maxacc.append(running_correct.item()/(batch_size*batch)) Accuracy_list.append(running_correct.item()/(batch_size*batch)) Loss_list.append(running_loss/(batch_size*batch)) ''' print('####################### 运行验证集 ################') val_Accuracy, val_Loss = val_train(net,netname,criterion,mask,batch_size,use_gpu) #更新精度并保存模型 if val_Accuracy - tempacc > 0: tempacc = val_Accuracy torch.save(net,os.path.join('/media/liqiang/windata/project/classification/plugin/model',netname+'_'+'net.pkl')) val_Accuracy_list.append(val_Accuracy) val_Loss_list.append(val_Loss) print('####################### 验证集结束 ################') print('####################### 运行测试集 ################') test_Accuracy, test_Loss = test_train(netname,criterion,mask,batch_size,use_gpu) test_Accuracy_list.append(test_Accuracy) test_Loss_list.append(test_Loss) print('####################### 测试集结束 ################') ''' #保存网络结构 torch.save(net,os.path.join('/media/liqiang/windata/project/classification/plugin/model','ex4'+netname+'_'+'net.pkl')) print('预测标签:{}, 真实标签:{}'.format(pred, labels)) y1 = Accuracy_list y2 = Loss_list Allacc.append(y1) Alllos.append(y2) val_Allacc.append(val_Accuracy_list) val_Alllos.append(val_Loss_list) test_Allacc.append(test_Accuracy_list) test_Alllos.append(test_Loss_list) ###保存训练集训练曲线 for i in range(len(netlist)): plt.plot(range(0,len(Allacc[i])), Allacc[i],label=netlist[i]) plt.legend() plt.xlabel('Accuracy vs. iters') plt.ylabel('Accuracy') plt.savefig(os.path.join('/media/liqiang/windata/project/classification/plugin/result','ex4'+'train_'+"accuracy.jpg")) plt.show() plt.close() fig = plt.figure() bax = brokenaxes(ylims=((-0.001, .04), (.06, .07)), hspace=.05, despine=False) for i in range(len(netlist)): # plt.plot(range(0,len(Alllos[i])), Alllos[i], label=netlist[i]) # plt.legend() bax.plot(range(0,len(Alllos[i])), Alllos[i], label=netlist[i]) bax.legend() # plt.xlabel('Loss vs. iters') # plt.ylabel('Loss') bax.set_xlabel('Loss vs. iters') bax.set_ylabel('Loss') # plt.yscale('log') # plt.ylim([-0.01,0.06]) plt.savefig(os.path.join('/media/liqiang/windata/project/classification/plugin/result','ex4'+'train_'+"loss.jpg")) plt.show() plt.close() '''
def train(): # net = alexnet() # print(net) # use_gpu = True # if use_gpu: # net = net.cuda() # x, y = Generations(200) torch_device = torch.device('cuda') # train_x, train_y, test_x, test_y, val_x, val_y = getData() #load net layer = 1 #channels use_gpu = True #是否使用gpu pretrained = False #是否使用与训练模型 batch_size = 30 netlist = ['mobilenet','resnet','shufflenet','squeezenet','alexnet','densenet','googlenet','mnastnet','vgg16'] # netlist = ['mobilenet','resnet','shufflenet','squeezenet','alexnet','densenet','googlenet','mnastnet'] # netlist = ['mobilenet','resnet','vgg16'] # netlist = ['googlenet'] Allacc = [] Alllos = [] val_Allacc = [] val_Alllos = [] test_Allacc = [] test_Alllos = [] for netname in netlist: time_start = time.time() if netname=='mobilenet': net = modelnet.mobilenet(layer,use_gpu,pretrained) elif netname=='resnet': net = modelnet.resnet(layer,use_gpu,pretrained) elif netname=='shufflenet': net = modelnet.shufflenet(layer,use_gpu,pretrained) elif netname=='squeezenet': net = modelnet.squeezenet(layer,use_gpu,pretrained) elif netname=='alexnet': net = modelnet.alexnet(layer,use_gpu,pretrained) elif netname=='densenet': net = modelnet.densenet(layer,use_gpu,pretrained) elif netname=='googlenet': net = modelnet.googlenet(layer,use_gpu,pretrained) elif netname=='mnastnet': net = modelnet.mnasnet(layer,use_gpu,pretrained) elif netname=='vgg16': net = modelnet.vgg16(layer,use_gpu,pretrained) # print(netname) print(net) # Loss and Optimizer criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters(),lr=1e-3) # optimizer = torch.optim.Adam(net.classifier.parameters()) # Train the model y0 = np.zeros(3000,dtype=np.int) y1 = np.ones(3000, dtype=np.int) y2 = np.ones(3000, dtype=np.int)*2 y_label = np.concatenate((y0,y1,y2),axis=0) scale = 0 loc = 1 ##生成测试图像 maxacc = [] Accuracy_list = [] Loss_list = [] val_Accuracy_list = [] val_Loss_list = [] test_Accuracy_list = [] test_Loss_list = [] tempacc = 0 for epoch in range(1): # optimizer = torch.optim.Adam(net.parameters()) #打乱数据和标签 num = random.randint(1,2000) random.seed(num) random.shuffle(y_label) index_in_epoch = 0 running_loss = 0.0 running_correct = 0 batch = 0 for iters in range(int(y_label.__len__()/batch_size)): batch += 1 mask = np.random.normal(size=(224,224), scale=scale, loc=loc) #loc表示均值,scale表示方差,size表示输出的size batch_x, batch_y, index_in_epoch = _next_batch(y_label, batch_size, index_in_epoch,mask) # for step, (inputs, labels) in enumerate(trainset_loader): # batch_xs = preprocess(batch_xs,layer) # batch_x = np.array([t.numpy() for t in batch_xs]) # optimizer.zero_grad() # 梯度清零 labels = batch_y.copy() tempdata = np.reshape(batch_x,(batch_size, 1, 224, 224)) batch_xx = torch.tensor(tempdata, dtype=torch.float) if use_gpu==True: # batch_xx = batch_xx.to(torch_device) batch_xx,labels = Variable(torch.tensor(batch_xx).cuda()), Variable(torch.tensor(labels).cuda()) else: batch_xx,labels = Variable(batch_xx), Variable(labels) optimizer.zero_grad() output = net(batch_xx) if netname=='googlenet': if len(output)==3: output = output.logits _,pred = torch.max(output.data, 1) # loss = criterion(output, onehotLab(labels, False)) loss = criterion(output, labels) loss = loss.requires_grad_() loss.backward() optimizer.step() running_loss += loss.data # running_loss += loss.item() running_correct += torch.sum(pred == labels) if running_correct.item()/(batch_size*batch) > 0: print("Batch {}, Train Loss:{:.6f}, Train ACC:{:.4f}".format( batch, running_loss/(batch_size*batch), running_correct.item()/(batch_size*batch))) # print('预测标签:{}, 真实标签:{}'.format(pred, labels)) maxacc.append(running_correct.item()/(batch_size*batch)) Accuracy_list.append(running_correct.item()/(batch_size*batch)) Loss_list.append(running_loss/(batch_size*batch)) time_end = time.time() print('totally cost',time_end-time_start)