def main(): cnn = CNN() print('init net') criterion = nn.MultiLabelSoftMarginLoss() optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate) # Train the Model train_dataloader = my_dataset.get_train_data_loader() for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_dataloader): images = Variable(images) labels = Variable(labels.float()) predict_labels = cnn(images) # print(predict_labels.type) # print(labels.type) loss = criterion(predict_labels, labels) optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % 10 == 0: print("epoch:", epoch, "step:", i, "loss:", loss.item()) if (i + 1) % 100 == 0: torch.save(cnn.state_dict(), "./model.pkl") #current is model.pkl print("save model") print("epoch:", epoch, "step:", i, "loss:", loss.item()) torch.save(cnn.state_dict(), "./model.pkl") #current is model.pkl print("save last model")
def main(): cnn = CNN_v1() cnn.to(DEVICE) cnn.train() print('init net') criterion = nn.MultiLabelSoftMarginLoss() optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate) lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[15, 20, 25]) # Train the Model train_dataloader = my_dataset.get_train_data_loader(batch_size) for epoch in range(num_epochs): # tem_p='' for i, (images, labels) in tqdm(enumerate(train_dataloader)): images = Variable(images).to(DEVICE) labels = Variable(labels.float()).to(DEVICE) predict_labels = cnn(images) # print(predict_labels) # print(labels) loss = criterion(predict_labels, labels) optimizer.zero_grad() loss.backward() optimizer.step() # if (i+1) % 10 == 0: # print("epoch:", epoch, "step:", i, "loss:", loss.item()) # if (i+1) % 10000 == 0: # torch.save(cnn.state_dict(), "./model.pkl") #current is model.pkl # print("save model") # print('epoch: %d \t batch_idx : %d \t loss: %.4f '% (epoch, i, loss)) print("epoch:", epoch, "step:", i, "loss:", loss.item()) torch.save(cnn.state_dict(), "./model.pkl") #current is model.pkl print("save last model")
def main(): device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") accuracy_list = [] # Train the Model print("Loading Data") train_dataloader = my_dataset.get_train_data_loader() test_dataloader = my_dataset.get_test_data_loader() print("Data Ready") accuracy = 0 for ttest_num in range(0, 5): print('>>>>>>>>>>>>>>>>> ROUND: ', ttest_num) cnn = CNN() cnn.to(device) cnn.train() print('init net') criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate) accuracy = 0 for epoch in range(num_epochs): print('....') for i, (images1, labels) in enumerate(train_dataloader): images1 = Variable(images1) labels = Variable(labels) #.view(-1,1)) images, labels = images1.to(device), labels.to(device) # images = generator(images) #labels = torch.tensor(labels, dtype=torch.long, device=device) predict_labels = cnn(images) #.view(1,-1)[0] # print(predict_labels.type) # print(labels.type) # print(images.shape) #print(labels) #print(predict_labels) loss = criterion(predict_labels, labels) optimizer.zero_grad() loss.backward() optimizer.step() # if (i+1) % 1500 == 0: # print("epoch:", epoch, "step:", i, "loss:", loss.item()) # if (i+1) % 2500 == 0: # print("save model") correct = 0 total = 0 for i, (images1, labels) in enumerate(test_dataloader): image = images1 #print(image) image = Variable(image) image, labels = image.to(device), labels.to(device) # image = generator(image) predict_label = cnn(image) labels = labels.cpu() predict_label = predict_label.cpu() _, predicted = torch.max(predict_label, 1) total += labels.size(0) # print(predicted,'>>>>>>>>',labels) if (predicted == labels): correct += 1 print( 'Test Accuracy of the model on the %d test images (%d): %f %%' % (total, correct, 100 * correct / total)) if (correct / total > accuracy): accuracy = correct / total torch.save( cnn.state_dict(), "./model_lake/" + model_name.replace('.', '_' + str(ttest_num) + '.')) # torch.save(cnn.state_dict(), "./model_lake/"+model_name) #current is model.pickle print('saved!!!!!!!!!!!!!!!!!!!!!!!') # torch.save(cnn.state_dict(), "./"+model_name) #current is model.pkl print("epoch:", epoch, "step:", i, "loss:", loss.item()) # print('final accuracy: ',accuracy) accuracy_list.append(accuracy) print(accuracy_list)
def main(): cnn = CNN() # if use_cuda: cnn = CNN().cuda() cnn.train() print('init net') criterion = nn.MultiLabelSoftMarginLoss() optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate) best_acc = 0 # Train the Model train_dataloader = my_dataset.get_train_data_loader(batch_size=batch_size) # for epoch in range(num_epochs): # for i, (images, labels) in enumerate(train_dataloader): # images = Variable(images).cuda() # labels = Variable(labels.float()).cuda() # predict_labels = cnn(images) # # print(predict_labels.type) # # print(labels.type) # loss = criterion(predict_labels, labels) # optimizer.zero_grad() # loss.backward() # optimizer.step() # if (i+1) % 10 == 0: # print("epoch:", epoch, "step:", i, "loss:", loss.item()) # if (i+1) % 100 == 0: # torch.save(cnn.state_dict(), "./model.pkl") #current is model.pkl # print("save model") # print("epoch:", epoch, "step:", i, "loss:", loss.item()) # torch.save(cnn.state_dict(), "./model_base.pkl") #current is model.pkl for epoch in range(num_epochs): correct = 0 total = 0 for i, (images, labels) in enumerate(train_dataloader): #lr_schedule # lr = cosine_anneal_schedule(epoch) # # for param_group in optimizer.param_groups: # #print(param_group['lr']) # param_group['lr'] = lr images = Variable(images).cuda() labels = Variable(labels.float()).cuda() predict_labels = cnn(images) # print(predict_labels.type) # print(labels.type) loss = criterion(predict_labels, labels) optimizer.zero_grad() loss.backward() optimizer.step() predict_labels = predict_labels.cpu() labels = labels.cpu() c0 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_labels[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c1 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_labels[0, captcha_setting.ALL_CHAR_SET_LEN:2 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c2 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_labels[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c3 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_labels[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] predict_labels = '%s%s%s%s' % (c0, c1, c2, c3) true_label = one_hot_encoding.decode(labels.numpy()[0]) total += labels.size(0) #print(predict_labels, true_label) if (predict_labels == true_label): correct += 1 acc = 100 * correct / total # if (total % 200 == 0): # print('Test Accuracy of the model on the %d train images: %f %%' % (total, 100 * correct / total)) if (i+1) % 10 == 0: print("epoch:", epoch, "step:", i, "loss:", loss.item(),"Accuracy:", acc) if acc >= best_acc: best_acc = acc torch.save(cnn.state_dict(), "./model_splic_cos_300_64_0.001.pkl") #current is model.pkl print("save model") #print("epoch:", epoch, "loss:", loss.item(),"Accuracy:", acc) #torch.save(cnn.state_dict(), "./model.pkl") #current is model.pkl print("END")
def main(): device = torch.device("cuda") #if torch.cuda.is_available() else "cpu") # Train the Model print('loading data') train_dataloader = my_dataset.get_train_data_loader() test_dataloader = my_dataset.get_test_data_loader() print('start training') loss_list = [] accuracy_list = [] for ttest_num in range(0, 5): cnn = nn.DataParallel(CNN()) # cnn.load_state_dict(torch.load('./'+model_name)) # cnn.to(device) cnn.cuda() # generator = Generator() # generator.load_state_dict(torch.load('7800.pkl')) # generator.to(device) # generator.eval() cnn.train() print('init net') criterion = nn.MultiLabelSoftMarginLoss() optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate) print('>>>>>>>>>>>>Round:', ttest_num) accuracy = 0 for epoch in range(num_epochs): loss_total = 0 start = datetime.now() for i, (images1, labels, _) in enumerate(train_dataloader): #print(i) images1 = Variable(images1) labels = Variable(labels.float()) images1, labels = images1.to(device), labels.to(device) # images1 = generator(images1) predict_labels = cnn(images1) # print(predict_labels.type) # print(labels.type) loss = criterion(predict_labels, labels) optimizer.zero_grad() loss.backward() optimizer.step() loss_total += loss.item() # if (i+1) % 5000 == 0: # print("epoch:", epoch, "step:", i, "loss:", loss.item()) # if (i+1) % 25000 == 0: # print("save model") correct = 0 total = 0 # print(len(test_dataloader)) for i, (images1, labels, _) in enumerate(test_dataloader): # print('///////////////////////////////') # try: image = images1 image = Variable(image) image, labels = image.to(device), labels.to(device) # image = generator(image) predict_label = cnn(image) labels = labels.cpu() predict_label = predict_label.cpu() c0 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c1 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, captcha_setting.ALL_CHAR_SET_LEN:2 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c2 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c3 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c4 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, 4 * captcha_setting.ALL_CHAR_SET_LEN:5 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c5 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, 5 * captcha_setting.ALL_CHAR_SET_LEN:6 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c6 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, 6 * captcha_setting.ALL_CHAR_SET_LEN:7 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] c7 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[ 0, 7 * captcha_setting.ALL_CHAR_SET_LEN:8 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] #c3 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())] predict_label = '%s%s%s%s%s%s%s%s' % (c0, c1, c2, c3, c4, c5, c6, c7) # predict_label = '%s%s%s%s' % (c0, c1, c2, c3) true_label = one_hot_encoding.decode(labels.numpy()[0]) total += 1 #labels.size(0) #save_image(vimage,'temp_result/'+str(i)+'.png') # print(predict_label.upper(),'>>>>>',true_label) if (predict_label.upper() == true_label.upper()): correct += 1 # try: print('Test Accuracy of the model on the %d test images: %f %%' % (total, 100 * correct / total)) # except: # pass loss_list.append(loss_total) with open("loss_record_6.txt", "wb") as fp: #Pickling pickle.dump(loss_list, fp) stop = datetime.now() # try: if (correct / total > accuracy): accuracy = correct / total # torch.save(cnn.state_dict(), "./model_lake/"+model_name.replace('.','_'+str(ttest_num)+'.')) #current is model.pickle torch.save(cnn.state_dict(), "./model_lake/" + model_name) #current is model.pickle print('saved!!!!!!!!!!!!!!!!!!!!!!!') # except: # pass print("epoch:", epoch, "step:", i, " time:<", stop - start, "> loss:", loss_total) accuracy_list.append(accuracy) print(sum(accuracy_list) / len(accuracy_list)) # torch.save(cnn.state_dict(), "./"+model_name) #current is model.pkl # print("save last model") print(accuracy_list)