["minnan", "nanchang", "kejia", "changsha", "shanghai", "hebei"]) sentences = [] with open("./label_dev_list_fb.txt", "r") as s: for line in s.readlines(): sentences.append(line.strip().split("/")[-1].split()[0].replace( "fb", "pcm")) sentences = np.array(sentences) #print len(sentences) ## ====================================== dev_dataset = TorchDataSet(dev_list, batch_size, chunk_num, dimension) logging.info('finish reading all train data') train_module = LanNet(input_dim=dimension, hidden_dim=128, bn_dim=30, output_dim=language_nums) logging.info(train_module) train_module.load_state_dict( torch.load('/inference/models/model9.model', map_location=lambda storage, loc: storage)) train_module.eval() epoch_tic = time.time() dev_loss = 0. dev_acc = 0. dev_batch_num = 0 ACC = 0 dev_size = 0 start = 0 size = 0
language_nums = 10 learning_rate = 0.1 batch_size = 20 chunk_num = 10 train_iteration = 10 display_fre = 50 half = 4 ## ====================================== train_dataset = TorchDataSet(train_list, batch_size, chunk_num, dimension) dev_dataset = TorchDataSet(dev_list, batch_size, chunk_num, dimension) logging.info('finish reading all train data') # 优化器,SGD更新梯度 train_module = LanNet(input_dim=dimension, hidden_dim=512, bn_dim=30, output_dim=language_nums) logging.info(train_module) optimizer = torch.optim.SGD(train_module.parameters(), lr=learning_rate, momentum=0.9) # 将模型放入GPU中 if use_cuda: train_module = train_module.to(device) for epoch in range(train_iteration): if epoch >= half: learning_rate /= 2. optimizer = torch.optim.SGD(train_module.parameters(), lr=learning_rate,
learning_rate = 0.1 batch_size = 64 chunk_num = 10 train_iteration = 1 display_fre = 50 half = 4 half_1 = 7 epoch = 0 ## ====================================== train_dataset = TorchDataSet(train_list, batch_size, chunk_num, dimension) dev_dataset = TorchDataSet(dev_list, batch_size, chunk_num, dimension) logging.info('finish reading all train data') # 优化器,SGD更新梯度 train_module = LanNet(input_dim=dimension, hidden_dim=128, bn_dim=30, output_dim=language_nums) logging.info(train_module) #optimizer = torch.optim.SGD(train_module.parameters(), lr=learning_rate, momentum=0.9) train_module.load_state_dict( torch.load('./inference/model9.model', map_location=lambda storage, loc: storage)) #optimizer = torch.optim.RMSprop(train_module.parameters(), lr=learning_rate, alpha=0.9) optimizer = torch.optim.SGD(train_module.parameters(), lr=learning_rate, momentum=0.9) # 将模型放入GPU中 if use_cuda: train_module = train_module.to(device) train_module.eval()