def qianyixuexi(): train_loader,test_loader=Img_S.Get_Dataloader() model_ft = models.resnet18() model_ft.load_state_dict(torch.load('D:\\Learn_Pytorch\\renet_model\\resnet18-5c106cde.pth')) monum_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(monum_ftrs, 10) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) # decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) for epoch in range(epochs): # for image,label in train_loader: # print(image) train_tool.train_one_epoch(model_ft,criterion,optimizer_ft,train_loader,device,epoch,print_freq=50) exp_lr_scheduler.step() train_tool.evaluate(model_ft,criterion,test_loader,device)
def Hyperspectral_RNN(): '''使用RNN对高光谱数据进行分类''' opath = "D:\\ZHR_USE\\torchmodel\\RNNHyperspectral\\Results\\双向RNN_Salinas\\" writer = SummaryWriter(opath) epochs = 301 train_loader, test_loader = Ima_S.Get_Salinas_Dataloader_RNNdata(9) #参数 i_size, hidden_size, num_layers, num_classes = 10, 50, 2, 16 # net=RNNModel.RNN(i_size,hidden_size,num_layers,num_classes,device) net = RNNModel.RNN_Bidirectional(i_size, hidden_size, num_layers, num_classes, device) # net = RNNModel.LSTM(i_size, hidden_size, num_layers, num_classes, device) # net = RNNModel.GRU_Bidirectional(i_size, hidden_size, num_layers, num_classes, device) net.to(device) dataiter = iter(train_loader) hydata, label = dataiter.next() writer.add_graph(net, (hydata.float(), )) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=0.01) test_iter_num = 0 #记录中间结果 train_loss = [] train_predict_label = 0 train_true_label = 0 test_loss = [] test_predict_label = 0 test_true_label = 0 test_iter_num = 0 ACC = 0 for epoch in range(epochs): train_loss_one_epoch, train_predict_label, train_true_label = train_tool.train_one_epoch( net, criterion, optimizer, train_loader, device, epoch, 50, writer) train_loss.append(train_loss_one_epoch) if epoch % 10 == 0: test_iter_num += 1 test_loss_one_epoch, test_predict_label, test_true_label = train_tool.evaluate( net, criterion, test_loader, device, test_iter_num, writer) test_loss.append(test_loss_one_epoch) ACC_one_epoch = metrics.accuracy_score(test_true_label, test_predict_label) if ACC_one_epoch > ACC: Out_Result(opath, net, train_predict_label, train_true_label, test_predict_label, test_true_label) ACC = ACC_one_epoch #输出最终的评价 # print(train_loss) train_loss = np.array(train_loss) np.savetxt(os.path.join(opath, "train_loss.csv"), train_loss, delimiter=',', fmt="%.04f") test_loss = np.array(test_loss) np.savetxt(os.path.join(opath, "test_loss.csv"), test_loss, delimiter=',', fmt="%.04f") writer.close()
def Run(): #tensorboard 可视化 input_to_model=torch.rand([64,1,28,28]) train_loader,test_loader=Img_Split.Get_Salinas_Dataloader() print("Load end...") net=NET(nT).to(device) # writer.add_graph(net, input_to_model) # writer.close() criterion=nn.CrossEntropyLoss() optimizer=optim.SGD(net.parameters(),lr=0.01,momentum=0.9) for epoch in range(epochs): # for image,label in train_loader: # print(image) train_tool.train_one_epoch(net,criterion,optimizer,train_loader,device,epoch,print_freq=50) if epoch%5==0: print("test") train_tool.evaluate(net,criterion,test_loader,device)
def Run(): trainloader, testloader = LY9.Get_Y9_Data() net = NET.NET_MultiChannel(5) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) # decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=1000, gamma=0.1) for epoch in range(epochs): # for image,label in train_loader: # print(image) train_tool.train_one_epoch(net, criterion, optimizer_ft, trainloader, device, epoch, print_freq=50) exp_lr_scheduler.step() if epoch % 100 == 0: train_tool.evaluate(net, criterion, testloader, device) torch.save(net, "D:\\Net.pkl") torch.save(net.state_dict(), "D:\\Net_param.pkl")
def RNN_All_Param_Run(): '''利用RNN对高光谱数据进行分类,所有参数一次输出''' opath = "D:\\ZHR_USE\\torchmodel\\RNNHyperspectral\\Results\\IndianPines_AllResults\\" epochs = 301 best_acc_dic = {} #记录过程中的所有模型的最佳精度 for s in range(3, 31, 2): train_loader, test_loader = Ima_S.Get_IPData_RNNData(s) #读入数据 # print("train data xinxi:",len(train_loader)) # for x,y in train_loader: # print(x.shape,y.shape) # exit() # train_loader, test_loader = Ima_S.Get_IPData_RNNData(s) for hidden in range(50, 51, 1): #不同神经元遍历 models = RNNModel.Get_RNN_Models(10, hidden, 2, 16, device) for x in models.items(): #不同模型遍历 model_name = x[0] sub_path = os.path.join( opath, model_name + "\\timestep_" + str(s) + "神经元个数_" + str(hidden)) if not os.path.exists(sub_path): os.makedirs(sub_path) start_time = time.time() net = x[1] net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=0.01) test_iter_num = 0 # 记录中间结果 train_loss = [] train_predict_label = 0 train_true_label = 0 test_loss = [] test_predict_label = 0 test_true_label = 0 test_iter_num = 0 ACC = 0 for epoch in range(epochs): train_loss_one_epoch, train_predict_label, train_true_label = train_tool.train_one_epoch( net, criterion, optimizer, train_loader, device, epoch, 50) train_loss.append(train_loss_one_epoch) if epoch % 10 == 0: test_iter_num += 1 test_loss_one_epoch, test_predict_label, test_true_label = train_tool.evaluate( net, criterion, test_loader, device, test_iter_num) test_loss.append(test_loss_one_epoch) ACC_one_epoch = metrics.accuracy_score( test_true_label, test_predict_label) print("精度ACC:", ACC_one_epoch) if ACC_one_epoch > ACC: Out_Result(sub_path, net, train_predict_label, train_true_label, test_predict_label, test_true_label, epoch) ACC = ACC_one_epoch best_acc_dic[model_name + "\ttimestep:" + str(s) + "\t神经元个数:" + str(hidden)] = ACC #输出花费时间 fp = open(os.path.join(sub_path, "spent_time.txt"), 'w') print("spent time: " + str(time.time() - start_time), file=fp) fp.close() # 输出最终的评价 # print(train_loss) train_loss = np.array(train_loss) np.savetxt(os.path.join(sub_path, "train_loss.csv"), train_loss, delimiter=',', fmt="%.04f") test_loss = np.array(test_loss) np.savetxt(os.path.join(sub_path, "test_loss.csv"), test_loss, delimiter=',', fmt="%.04f") best_acc_list = best_acc_dic.items() best_acc = np.row_stack(best_acc_list) np.savetxt(os.path.join(opath, "不同参数精度对比.txt"), best_acc, fmt='%s', delimiter="\t")
def Run_8wdata(): opath = "D:\\ZHR_USE\\1DCNN\Results\\8wdata_1DCNN_73核3_Adam_9windowsize" writer = SummaryWriter(opath) window_size = 9 trainloader, testloader = L8w.Get_Data_Logging_1DCNN(window_size) flat_len = int(((window_size * 2 + 1 - 7) / 2 - 2) / 2) net = NET.NET_8wdata_MultiChannel_3(6, flat_len) #tensorboard dataiter = iter(trainloader) logging, label = dataiter.next() print(logging.shape) writer.add_graph(net, (logging.float(), )) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.Adam(net.parameters(), lr=0.001) # decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.1) train_loss = [] train_predict_label = 0 train_true_label = 0 test_loss = [] test_predict_label = 0 test_true_label = 0 test_iter_num = 0 ACC = 0 #初始精度 #输出每次迭代的精度 fptrainACC = open(os.path.join(opath, "train_ACC.txt"), 'w') fptrainACC = open(os.path.join(opath, "train_ACC.txt"), 'a') fptestACC = open(os.path.join(opath, "test_ACC.txt"), 'w') fptestACC = open(os.path.join(opath, "test_ACC.txt"), 'a') for epoch in range(epochs): # for image,label in train_loader: # print(image) train_loss_one_epoch, train_predict_label, train_true_label = train_tool.train_one_epoch( net, criterion, optimizer_ft, trainloader, device, epoch, print_freq=200, writer=writer) train_loss.append(train_loss_one_epoch) # exp_lr_scheduler.step() train_ACC = metrics.accuracy_score(train_true_label, train_predict_label) print(train_ACC, file=fptrainACC) if epoch % 10 == 0: test_loss_one_epoch, test_predict_label, test_true_label = train_tool.evaluate( net, criterion, testloader, device, test_iter_num, writer) ACC_one_epoch = metrics.accuracy_score(test_true_label, test_predict_label) print(ACC_one_epoch, file=fptestACC) if ACC_one_epoch > ACC: Out_Result(opath, net, train_predict_label, train_true_label, test_predict_label, test_true_label, epoch) ACC = ACC_one_epoch test_iter_num += 1 test_loss.append(test_loss_one_epoch) # print(train_loss) train_loss = np.array(train_loss) np.savetxt(os.path.join(opath, "train_loss.csv"), train_loss, delimiter=',', fmt="%.04f") test_loss = np.array(test_loss) np.savetxt(os.path.join(opath, "test_loss.csv"), test_loss, delimiter=',', fmt="%.04f") writer.close()
def Run_8wdata_Diff_WindowSize(): '''此算法可一次导出所有窗口的所有结果''' opath = "D:\\ZHR_USE\\1DCNN\\Results\\" ACC_list = [] for i in range(7, 10, 2): subpath = os.path.join(opath, "SGD_" + str(i) + "windowsize_epochs400") if not os.path.exists(subpath): os.mkdir(subpath) trainloader, testloader = L8w.Get_Data_Logging_1DCNN(i) flat_len = int(((i * 2 + 1 - 7) / 2 - 2) / 2) net = NET.NET_8wdata_MultiChannel_3(6, flat_len) net.to(device) # tensorboard criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(net.parameters(), lr=0.001) # decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.1) train_ACC, test_ACC = 0, 0 test_iter_num = 0 ACC = 0 train_loss = [] test_loss = [] # 输出每次迭代的精度 fptrainACC = open(os.path.join(subpath, "train_ACC.txt"), 'w') fptrainACC = open(os.path.join(subpath, "train_ACC.txt"), 'a') fptestACC = open(os.path.join(subpath, "test_ACC.txt"), 'w') fptestACC = open(os.path.join(subpath, "test_ACC.txt"), 'a') for epoch in range(2): # for image,label in train_loader: # print(image) train_loss_one_epoch, train_predict_label, train_true_label = train_tool.train_one_epoch( net, criterion, optimizer_ft, trainloader, device, epoch, print_freq=200) train_loss.append(train_loss_one_epoch) train_ACC = metrics.accuracy_score(train_true_label, train_predict_label) print(train_ACC, file=fptrainACC) test_loss_one_epoch, test_predict_label, test_true_label = train_tool.evaluate( net, criterion, testloader, device, test_iter_num) test_loss.append(test_loss_one_epoch) ACC_one_epoch = metrics.accuracy_score(test_true_label, test_predict_label) print(ACC_one_epoch, file=fptestACC) if ACC_one_epoch > ACC: Out_Result(subpath, net, train_predict_label, train_true_label, test_predict_label, test_true_label, epoch) ACC = ACC_one_epoch # train_ACC=metrics.accuracy_score(train_true_label, train_predict_label) # test_ACC = metrics.accuracy_score(test_true_label, test_predict_label) # print(loss) train_loss = np.array(train_loss) np.savetxt(os.path.join(subpath, "train_loss.csv"), train_loss, delimiter=',', fmt="%.04f") test_loss = np.array(test_loss) np.savetxt(os.path.join(subpath, "test_loss.csv"), test_loss, delimiter=',', fmt="%.04f") ACC = np.array((i, train_ACC, test_ACC)) ACC_list.append(ACC) odata = np.row_stack(ACC_list) np.savetxt(opath + "SGD400迭代次数不同窗口的ACC.txt", odata, fmt="%.04f")