ques_num = "" #question file name, contains '.txt' sus_path = main_path + "sus_user" # folders that contains all suspicious community data sus_user_set = set() #save the id of suspicious users sus_name_set = set() #save the name of suspicious community user answered_user_set = set() #save the id of users who answered this question ques_path = main_path + "all_questions" #path for question folder ques_list = os.listdir(ques_path) #get all question list name # print("") # print("question list = %r" % ques_list) # print("") result_path = main_path + "result/" #store files of results for each question # `ques_list.remove(".DS_Store") #remove invisible files match_user_set = set() #suspicious users who answered this question sus_user_group_list = os.listdir(sus_path) # sus_user_group_list.remove(".DS_Store") cal_CAR = CAR() cal_QAR = QAR() cal_AMAR = AMAR() all_user_dic = {} read_path = "/Users/AlanLi/Desktop/Zhihu_data/all_userlist.txt" result_path = main_path + "/result/" all_user_name_set = set() #store all vip's username reverse_user_dic = {} # key is username, value is id original_dic = {} #store the original suspicious user id and data_set = set() #check if one data file has exisited before num_of_question = len(ques_list) #total number of questions we analysis for each specific group of people num_of_noMatched = 0 #number of questions that has 0 matched users num_of_matchedQue = 0 #number of questions that have non-empty match_user_set num_of_noAMAR = 0 #number of questions that AMAR cannot be calculated num_of_allData = 0 #number of questions with all three data non-zero
car_qar = "CAR_QAR" amar = "AMAR" question = "/all_questions/" ques_num = "" #question file name, contains '.txt' sus_path = main_path + "sus_user_group" # folders that contains all suspicious community data sus_user_set = set() #save the id of suspicious users sus_name_set = set() #save the name of suspicious community user answered_user_set = set() #save the id of users who answered this question ques_path = main_path + "all_questions" #path for question folder ques_list = os.listdir(ques_path) #get all question list name result_path = main_path + "result/" #store files of results for each question ques_list.remove(".DS_Store") #remove invisible files match_user_set = set() #suspicious users who answered this question sus_user_group_list = os.listdir(sus_path) sus_user_group_list.remove(".DS_Store") cal_CAR = CAR() cal_QAR = QAR() cal_AMAR = AMAR() all_user_dic = {} read_path = "/Users/AlanLi/Desktop/Zhihu_data/all_userlist.txt" result_path = main_path + "/result/" num_of_question = len(ques_list) #total number of questions we analysis for each specific group of people num_of_noMatched = 0 #number of questions that has 0 matched users num_of_matchedQue = 0 #number of questions that have non-empty match_user_set num_of_noAMAR = 0 #number of questions that AMAR cannot be calculated num_of_allData = 0 #number of questions with all three data non-zero """ get all users name and id into dictionary """ oh_my_god = "/Users/AlanLi/Desktop/Zhihu_data/all_user_list.txt" ohmyfile = open(oh_my_god, 'r')
def main(): dataset = Train_DataSet("../pytorch-3d-semseg/data_road") dataset_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=0) dataset_test = Test_DataSet("../pytorch-3d-semseg/data_road") dataset_loader_test = DataLoader(dataset_test, batch_size=1, shuffle=False, drop_last=True, num_workers=0) model = CAR().to(device) # checkpoint_filename = './model_CAR7/model_epoch_200.pth' # if os.path.exists(checkpoint_filename): # model.load_state_dict(torch.load(checkpoint_filename)) optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=4e-6) scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=100, T_mult=2) loss_plt = vis.line(Y=torch.Tensor(1).zero_(), opts=dict(title='ResNeSt Test', legend=['loss Semantic Segmentation'], showlegend=True)) iters = len(dataset_loader) epochs = 1800 start = time.time() for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(dataset_loader, 0): images, labels, name = data images = images.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize output, loss = model(images, labels) loss.backward() optimizer.step() scheduler.step(epoch + i / iters) # print statistics running_loss += loss.item() if i % 20 == 19: value_tracker(vis, torch.Tensor([i + epoch * len(dataset_loader)]), torch.Tensor([running_loss / 10]), loss_plt) print("[%d, %5d] loss: %.3f lr: %f, time: %.3f" % (epoch + 1, i + 1, running_loss / 10, optimizer.param_groups[0]['lr'], time.time() - start)) start = time.time() running_loss = 0.0 del loss, output # Check Accuracy if epoch == 0 or epoch % 100 == 99: save_path = "./output/" acc_check(model, device, dataset_loader_test, epoch, save_path) torch.save(model.state_dict(), "./model/model_epoch_{}.pth".format(epoch + 1)) start = time.time() print("Finished Training...!")