def run_batch(batch_text: List[str], model: BertForSequenceClassification, tokenizer: BertTokenizerFast) -> List[Dict]: """ Run model classifier for a list of paragraphs. :param batch_text: List of paragraph strings. :param model: The paragraph classifier. :param tokenizer: The BERT tokenizer. :return: Classification result represented using a dict. """ all_tokenized, batch = paragraphs2batch(batch_text, tokenizer) model.eval() # Make sure they are on the right dev for key, value in batch.items(): batch[key] = value.to(torch_dev()) outputs = model(**batch, return_dict=True).logits scores = get_classification_scores(outputs) results = [{ 'text': batch_text[i], 'tokens': all_tokenized[i], 'scores': scores[i] } for i in range(len(batch_text))] return results
def evalate(eval_dataset, model: BertForSequenceClassification, batch_size=batch_size): model.eval() eval_sampler = RandomSampler(eval_dataset) eval_loader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size) tr_acc = 0 if cuda: torch.cuda.empty_cache() for step, batch in tqdm(enumerate(eval_loader)): inputs = { 'input_ids': batch[1], 'token_type_ids': batch[2], 'attention_mask': batch[3], 'labels': batch[0] } outputs = model(**inputs) # loss = outputs[0] logits = outputs[1] # tr_loss += loss.item() # 计算准确率 _, pred = logits.max(1) number_corr = (pred == batch[0].view(-1)).long().sum().item() tr_acc += number_corr return tr_acc / len(eval_dataset)
def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def make_predictions( sentence_array: np.array, model: BertForSequenceClassification, tokenizer: BertTokenizer, device: torch.device, hyperparameter_dict: dict) -> typing.Tuple[np.array, np.array]: """ Make predictions on DataFrame containing sentences with given model :param model: Torch model :param tokenizer: BERT-base tokenizer :param device: Torch device :param max_length: Max length of input sequence (for padding) :param hyperparameter_dict: Dictionary of model hyperparameters :return: NumPy array of label predictions """ # Prepare data encoded_sentences = [] for sentence in sentence_array: enc_sent_as_list = tokenizer.encode(sentence, add_special_tokens=True) encoded_sentences.append(enc_sent_as_list) input_array, input_attention_mask_array = _create_sentence_input_arrays( encoded_sentences, hyperparameter_dict['max_length']) input_tensor = torch.tensor(input_array) input_attention_mask_tensor = torch.tensor(input_attention_mask_array) input_dataset = TensorDataset(input_tensor, input_attention_mask_tensor) input_data_loader = DataLoader( input_dataset, batch_size=hyperparameter_dict['batch_size']) # Run model model.eval() logit_list = [] for batch in input_data_loader: batch_input_ids = batch[0].to(device) batch_attention_mask = batch[1].to(device) with torch.no_grad(): outputs = model(input_ids=batch_input_ids, token_type_ids=None, attention_mask=batch_attention_mask) logits = outputs[0] logit_list.append(logits) logits_tensor = torch.cat(logit_list, dim=0) prob_tensor = torch.softmax(logits_tensor, dim=1) return np.array(logits_tensor.cpu()), np.array(prob_tensor.cpu())
def create_and_check_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = BertForSequenceClassification(config) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) result = { "loss": loss, "logits": logits, } self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.num_labels]) self.check_loss_output(result)
def test_classifier(model: BertForSequenceClassification, dataset: TensorDataset, batch_size: int): device = select_device() prediction_dataloader = DataLoader(dataset, sampler=SequentialSampler(dataset), batch_size=batch_size) print("") print("Running Prediction...") model.to(device) model.eval() predictions, true_labels = [], [] for batch in prediction_dataloader: b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2] with torch.no_grad(): outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) logits = outputs.logits logits = logits.detach().cpu().numpy() label_ids = b_labels.numpy() #predictions.append(logits) predictions.extend(list(np.argmax(logits, axis=1).flatten())) true_labels.extend(list(label_ids)) print('DONE.') return predictions, true_labels
def model_infer(config,test_load,k): print("***********load model weight*****************") model_config = model_config = BertConfig() model_config.vocab_size = len(pd.read_csv('../user_data/vocab',names=["score"])) model = BertForSequenceClassification(config=model_config) model.load_state_dict(torch.load('../user_data/save_model/{}_best_model.pth.tar'.format(config.model_name))['status']) model = model.to(config.device) print("***********make predict for test file*****************") model.eval() predict_all = [] with torch.no_grad(): for batch, (input_ids, token_type_ids, attention_mask, label) in enumerate(test_load): input_ids = input_ids.to(config.device) attention_mask = attention_mask.to(config.device) token_type_ids = token_type_ids.to(config.device) outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) logits = outputs.logits pred_pob = torch.nn.functional.softmax(logits, dim=1)[:, 1] predict_all.extend(list(pred_pob.detach().cpu().numpy())) # submit_result(predict) if k==0: df=pd.DataFrame(predict_all,columns=["{}_socre".format(k+1)]) df.to_csv('./{}_result.csv'.format(config.model_name),index=False) else: df=pd.read_csv('./{}_result.csv'.format(config.model_name)) df["{}_socre".format(k+1)] = predict_all df.to_csv('./{}_result.csv'.format(config.model_name),index=False) print("***********done*****************")
def inference_no_args( data: TensorDataset, loader: DataLoader, logger: Logger, model: BertForSequenceClassification, batch_size: int, ) -> List[float]: device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') predictions = [] states = [] logger.info("***** Running inference {} *****".format("")) logger.info(" Num examples = %d", len(data)) logger.info(" Batch size = %d", batch_size) model.to(device) model.eval() for batch in tqdm(loader, desc="Inference"): batch = tuple(t.to(device) for t in batch) logits, state = model.forward(input_ids=batch[0], attention_mask=batch[1], token_type_ids=batch[2], output_hidden_states=True) predictions.extend(logits.cpu()) states.extend(state[-1][:, 0, :].cpu()) return predictions, states
'Epoch: {}/{} Train'.format(epoch + 1, num_epochs)) train_iter.set_postfix(train_loss=train_loss / train_count, train_acc=np.mean(train_acc)) if not os.path.isdir('./model/judger_textcnn/{}x{}'.format(STD_ID_1, STD_ID_2)): os.makedirs('./model/judger_textcnn/{}x{}'.format(STD_ID_1, STD_ID_2)) torch.save(model, "./model/judger_textcnn/{}x{}/textcnn_judger_{}x{}__{}.pth".format(STD_ID_1, STD_ID_2, STD_ID_1, STD_ID_2, epoch + 1 + save_offset)) with torch.no_grad(): eval_count = 0 eval_loss = 0 eval_acc = [] eval_iter = tqdm(dataiter_eval) for sentences, attn_masks, std_ids, ext_id, label in eval_iter: model.eval() textCNN.eval() model_.eval() if torch.cuda.is_available(): sentences = Variable(sentences.cuda()) attn_masks = Variable(attn_masks.cuda()) std_ids = Variable(std_ids.cuda()) else: sentences = Variable(sentences) attn_masks = Variable(attn_masks) std_ids = Variable(std_ids) em = model(input_ids=sentences, attention_mask=attn_masks)[0] outputs = textCNN(em) loss = criterion( outputs.view(-1, outputs.shape[1]), std_ids.view(-1)) loss = loss.mean()
class Classifier: """The Classifier""" ############################################# def __init__(self, train_batch_size=16, eval_batch_size=8, max_length=128, lr=2e-5, eps=1e-6, n_epochs=11): """ :param train_batch_size: (int) Training batch size :param eval_batch_size: (int) Batch size while using the `predict` method. :param max_length: (int) Maximum length for padding :param lr: (float) Learning rate :param eps: (float) Adam optimizer epsilon parameter :param n_epochs: (int) Number of epochs to train """ # model parameters self.train_batch_size = train_batch_size self.eval_batch_size = eval_batch_size self.max_length = max_length self.lr = lr self.eps = eps self.n_epochs = n_epochs # Information to be set or updated later self.trainset = None self.categories = None self.labels = None self.model = None # Tokenizer self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # The model # # We first need to specify some configurations to the model configs = BertConfig.from_pretrained( 'bert-base-uncased', num_labels=3, type_vocab_size=8) # BERT configuration self.model = BertForSequenceClassification(configs) # We are changing the header classifier of the model (Which is initially a simple fully connect layer layer) clf = Net() self.model.classifier = clf self.model.to( device ) # putting the model on GPU if available otherwise device is CPU def preprocess(self, sentences): """ The preprocessing function :param sentences: List of all sentences to be given at once. :return: List of preprocessed sentences. """ preprocessed = [] for sentence in tqdm(sentences): assert isinstance(sentence, str) doc = nlp(str(sentence)) tokens = [] for token in doc: if (not token.is_punct) or (token.text not in [ ',', '-', '.', "'", '!' ]): # Some punctuations can be interesting for BERT tokens.append(token.text) tokens = (' '.join(tokens)).lower().replace(" '", "'") preprocessed.append(tokens) return preprocessed def question(self, category): """ Computes the questions corresponding to each category :param category: (str) The category/aspect :return: (str) computed question using the QA-M task """ assert category in self.categories if category == 'AMBIENCE#GENERAL': return "what do you think of the ambience of it ?" elif category == 'DRINKS#PRICES' or category == 'FOOD#PRICES' or category == 'RESTAURANT#PRICES': return "what do you think of the price of it ?" elif category == 'DRINKS#QUALITY' or category == 'FOOD#QUALITY': return "what do you think of the quality of it ?" elif category == 'DRINKS#STYLE_OPTIONS': return "what do you think of drinks ?" elif category == 'FOOD#STYLE_OPTIONS': return "what do you think of the food ?" elif category == 'LOCATION#GENERAL': return "what do you think of the location of it ?" elif category == 'RESTAURANT#GENERAL' or category == 'RESTAURANT#MISCELLANEOUS': return "what do you think of the restaurant ?" elif category == 'SERVICE#GENERAL': return "what do you think of the service of it ?" def train(self, trainfile): """Trains the classifier model on the training set stored in file trainfile""" # Loading the data and splitting up its information in lists print("\n Loading training data...") trainset = np.genfromtxt(trainfile, delimiter='\t', dtype=str, comments=None) self.trainset = trainset n = len(trainset) targets = trainset[:, 0] categories = trainset[:, 1] self.labels = list(Counter(targets).keys()) # label names self.categories = list(Counter(categories).keys()) # category names start_end = [[int(x) for x in w.split(':')] for w in trainset[:, 3]] # target words words_of_interest = [ trainset[:, 4][i][start_end[i][0]:start_end[i][1]] for i in range(n) ] # sentences to be classified sentences = [str(s) for s in trainset[:, 4]] # Preprocessing the text data print(" Preprocessing the text data...") sentences = self.preprocess(sentences) # Computing question sequences print(" Computing questions...") questions = [self.question(categories[i]) for i in tqdm(range(n))] # Tokenization attention_masks = [] input_ids = [] token_type_ids = [] labels = [] for word, question, answer in zip(words_of_interest, questions, sentences): encoded_dict = self.tokenizer.encode_plus( answer, question + ' ' + word.lower(), add_special_tokens=True, # Add '[CLS]' and '[SEP]' tokens max_length=self.max_length, # Pad & truncate all sequences pad_to_max_length=True, return_attention_mask=True, # Construct attention masks return_tensors='pt', # Return pytorch tensors. ) attention_masks.append(encoded_dict['attention_mask']) input_ids.append(encoded_dict['input_ids']) token_type_ids.append(encoded_dict['token_type_ids']) attention_masks = torch.cat(attention_masks, dim=0) input_ids = torch.cat(input_ids, dim=0) token_type_ids = torch.cat(token_type_ids, dim=0) # Converting polarities into integers (0: positive, 1: negative, 2: neutral) for target in targets: if target == 'positive': labels.append(0) elif target == 'negative': labels.append(1) elif target == 'neutral': labels.append(2) labels = torch.tensor(labels) # Pytorch data iterators train_data = TensorDataset(input_ids, attention_masks, token_type_ids, labels) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, batch_size=self.train_batch_size, sampler=train_sampler) # Optimizer and scheduler (we are using a linear scheduler without warm up) no_decay = ['bias', 'gamma', 'beta'] # These parameters are not going to be decreased optimizer_parameters = [{ 'params': [ p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [ p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_parameters, lr=self.lr, eps=self.eps) total_steps = len(train_dataloader) * self.n_epochs scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps) # Training initial_t0 = time.time() for epoch in range(self.n_epochs): print('\n ======== Epoch %d / %d ========' % (epoch + 1, self.n_epochs)) print(' Training...\n') t0 = time.time() total_train_loss = 0 self.model.train() for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids_, input_mask_, segment_ids_, label_ids_ = batch self.model.zero_grad() loss, _ = self.model(input_ids_, token_type_ids=segment_ids_, attention_mask=input_mask_, labels=label_ids_) total_train_loss += loss.item() loss.backward() # clip gradient norm torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0) optimizer.step() scheduler.step() avg_train_loss = total_train_loss / len(train_dataloader) training_time = format_time(time.time() - t0) # print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epoch duration: {:}".format(training_time)) print(" Total training time: {:}".format( format_time(time.time() - initial_t0))) def predict(self, datafile): """Predicts class labels for the input instances in file 'datafile' Returns the list of predicted labels """ # Loading the data and splitting up its information in lists evalset = np.genfromtxt(datafile, delimiter='\t', dtype=str, comments=None) m = len(evalset) categories = evalset[:, 1] start_end = [[int(x) for x in w.split(':')] for w in evalset[:, 3]] # target words words_of_interest = [ evalset[:, 4][i][start_end[i][0]:start_end[i][1]] for i in range(m) ] # sentences to be classified sentences = [str(s) for s in evalset[:, 4]] # Preprocessing the text data print("\n Preprocessing the text data...") sentences = self.preprocess(sentences) # Computing question sequences print(" Computing questions...") questions = [self.question(categories[i]) for i in tqdm(range(m))] # Tokenization attention_masks = [] input_ids = [] token_type_ids = [] for word, question, answer in zip(words_of_interest, questions, sentences): encoded_dict = self.tokenizer.encode_plus( answer, question + ' ' + word.lower(), add_special_tokens=True, # Add '[CLS]' and '[SEP]' max_length=self.max_length, # Pad & truncate all sequences pad_to_max_length=True, return_attention_mask=True, # Construct attention masks return_tensors='pt', # Return pytorch tensors. ) attention_masks.append(encoded_dict['attention_mask']) input_ids.append(encoded_dict['input_ids']) token_type_ids.append(encoded_dict['token_type_ids']) attention_masks = torch.cat(attention_masks, dim=0) input_ids = torch.cat(input_ids, dim=0) token_type_ids = torch.cat(token_type_ids, dim=0) # Pytorch data iterators eval_data = TensorDataset(input_ids, attention_masks, token_type_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, batch_size=self.eval_batch_size, sampler=eval_sampler) # Prediction named_labels = [] self.model.eval() for batch in eval_dataloader: batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids = batch with torch.no_grad(): logits = self.model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0] logits = softmax(logits, dim=-1) logits = logits.detach().cpu().numpy() outputs = np.argmax(logits, axis=1) # converting integer labels into named labels for label in outputs: if label == 0: named_labels.append('positive') elif label == 1: named_labels.append('negative') elif label == 2: named_labels.append('neutral') return np.array(named_labels)
def train_and_test(): # prepare data fileNameList = glob.glob( 'C:/YYQ/PGproject/PreProcessing/processed_features_facenet/*.pkl') # print(fileNameList) # basic features # text-list and tf-idf text_list = [] labels = [] visual_features = [] audio_features = [] for file_name in fileNameList: data_point = pkl.load(open(file_name, 'rb')) clip_name, label, transcription, smoothed_seq = data_point[ 0], data_point[1], data_point[2], data_point[3] # print(label, transcription) # continue labels.append(label) text_list.append(transcription) # average visual features # visual_seq = np.stack([w['landmark_feature'] for w in smoothed_seq], axis=0) visual_seq = np.stack( [w['facenet_feature'].squeeze() for w in smoothed_seq], axis=0) # visual_seq = scale(visual_seq) # visual_seq = visual_seq - np.mean(visual_seq, axis=0) # print(visual_seq.shape) # visual_mean = np.mean(visual_seq, axis=0) visual_features.append(visual_seq) # average audio features audio_seq = np.stack([w['audio_grp'] for w in smoothed_seq], axis=0) # audio_seq = scale(audio_seq) # print(audio_seq.shape) # audio_mean = np.mean(audio_seq, axis=0) audio_features.append(audio_seq) # exit() print(text_list) lens = [len(a.split()) for a in text_list] print(min(lens), max(lens)) exit() tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') pg = tokenizer.batch_encode_plus(text_list, max_length=128, pad_to_max_length=True) '''print(len(pg)) for k in pg.keys(): print(k, len(pg[k]), [len(kk) for kk in pg[k]])''' x = pg['input_ids'] token_type_ids = pg['token_type_ids'] attention_mask = pg['attention_mask'] '''for xx in x: print(xx)''' x, token_type_ids, attention_mask = np.array(x), np.array( token_type_ids), np.array(attention_mask) labels = np.array(labels) skf = StratifiedKFold(n_splits=5) cv5_ids = list(skf.split(x, labels)) sp = cv5_ids[0] train_l, train_labels = x[sp[0]], labels[sp[0]] # train_data, train_labels = sm.fit_sample(train_data, train_labels) test_l, test_labels = x[sp[1]], labels[sp[1]] print(train_l.shape) train_token_type_ids, test_token_type_ids, train_attention_mask, test_attention_mask = token_type_ids[sp[0]], \ token_type_ids[sp[1]], attention_mask[sp[0]], attention_mask[sp[1]] # shuffle training data for batch reading n_train = len(train_l) n_eval = len(test_l) perm = np.random.permutation(n_train) train_l = train_l[perm] train_labels = np.array(train_labels)[perm] train_token_type_ids, train_attention_mask = train_token_type_ids[ perm], train_attention_mask[perm] train_l, test_l, train_labels, test_labels, train_token_type_ids, test_token_type_ids = torch.LongTensor(train_l), \ torch.LongTensor(test_l), \ torch.LongTensor(train_labels), \ torch.LongTensor(test_labels), \ torch.LongTensor(train_token_type_ids), \ torch.LongTensor(test_token_type_ids) train_attention_mask, test_attention_mask = torch.FloatTensor(train_attention_mask), \ torch.FloatTensor(test_attention_mask) # model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=3).to('cuda') config = BertConfig.from_pretrained('bert-base-uncased', num_labels=3) model = BertForSequenceClassification(config).to('cuda') # print(model(train_l[:32], token_type_ids=train_token_type_ids[:32], attention_mask=train_attention_mask[:32], labels=train_labels[:32])[1]) eval_every = 5 batch_size = 32 test_batch_size = 8 max_epochs = 500 t_total = math.ceil(n_train / batch_size) * max_epochs lr = 2e-5 epsilon = 1e-8 max_grad_norm = 1.0 weight_decay = 0.0 optimizer, scheduler = get_optimizers(model, learning_rate=lr, adam_epsilon=epsilon, weight_decay=weight_decay, num_training_steps=t_total) # loss_fn = torch.nn.CrossEntropyLoss().cuda() model.train() model.zero_grad() for ep in range(max_epochs): idx = 0 avg_loss = 0 n_batch = 0 model.train() while idx < n_train: optimizer.zero_grad() batch_l = train_l[idx:(idx + batch_size)].to('cuda') batch_ty = train_token_type_ids[idx:(idx + batch_size)].to('cuda') batch_am = train_attention_mask[idx:(idx + batch_size)].to('cuda') ans = train_labels[idx:(idx + batch_size)].to('cuda') idx += batch_size preds = model(input_ids=batch_l, token_type_ids=batch_ty, attention_mask=batch_am, labels=ans) loss = preds[0] # print(preds, ans) loss.backward() # print(loss.data.cpu().numpy()) avg_loss += loss.data.cpu().numpy() n_batch += 1. torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() avg_loss = avg_loss / n_batch print("epoch: %d avg_loss: %f" % (ep + 1, avg_loss)) del batch_l, batch_ty, batch_am torch.cuda.empty_cache() # time.sleep(20) if ep % eval_every == 0: idx = 0 model.eval() eval_preds = np.array([]) while idx < n_eval: test_batch_l = test_l[idx:(idx + test_batch_size)].to('cuda') test_batch_ty = test_token_type_ids[idx:( idx + test_batch_size)].to('cuda') test_batch_am = test_attention_mask[idx:( idx + test_batch_size)].to('cuda') test_ans = test_labels[idx:(idx + test_batch_size)].to('cuda') # time.sleep(20) # exit() test_pred = model(input_ids=test_batch_l, token_type_ids=test_batch_ty, attention_mask=test_batch_am, labels=test_ans) scores = test_pred[1] _, batch_eval_preds = scores.data.cpu().max(1) eval_preds = np.concatenate((eval_preds, batch_eval_preds), axis=-1) idx += test_batch_size # metrics precison, recall, fscore, support = precision_recall_fscore_support( test_labels.cpu().numpy(), eval_preds, labels=[0, 1, 2], average=None) '''scores = model(train_data, train_lens) _, train_preds = scores.data.cpu().max(1) print("training set: %f" % (float(sum(train_preds.numpy() == train_labels.cpu().numpy())) / len(train_preds.numpy()))) print(eval_preds.numpy())''' print( float(sum(eval_preds == test_labels.cpu().numpy())) / len(eval_preds)) print(precison, recall, fscore, support)
model.cuda() model_1.cuda() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda() model_1 = torch.nn.DataParallel(model_1, device_ids=[0, 1, 2, 3]).cuda() model.to(device) model_1.to(device) model_dict = torch.load("./model/supreme/l{}/bert_supreme_{}.pth".format( LABEL_ID, EVAL_EPOCH)).module.state_dict() model.module.load_state_dict(model_dict) # %% with torch.no_grad(): model.eval() model_1.eval() correct_dict = [] err_dict = [] eval_correct_num = 0 eval_list_iter = tqdm(eval_list) for idx, item in enumerate(eval_list_iter): cur_eval_result_scores = torch.tensor([]) eval_list_iter.set_description('{}/{}'.format(idx + 1, len(eval_list))) eval_list_iter.set_postfix(correct_num=eval_correct_num, eval_acc=eval_correct_num / (idx + 1)) T = tokenizer(item[3], add_special_tokens=True, max_length=100, padding='max_length',
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default='/hdd/lujunyu/dataset/multi_turn_corpus/ubuntu/', type=str, required=False, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--task_name", default='ubuntu', type=str, required=False, help="The name of the task to train.") parser.add_argument("--output_dir", default='/hdd/lujunyu/model/chatbert/ubuntu_without_pretraining/', type=str, required=False, help="The output directory where the model checkpoints will be written.") ## Other parameters parser.add_argument("--init_model_name", default='bert-base-uncased', type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") parser.add_argument("--do_lower_case", default=True, action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument("--data_augmentation", default=False, action='store_true', help="Whether to use augmentation") parser.add_argument("--max_seq_length", default=256, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=True, action='store_true', help="Whether to run training.") parser.add_argument("--do_test", default=True, action='store_true', help="Whether to run eval on the test set.") parser.add_argument("--train_batch_size", default=500, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=500, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=3e-3, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=10.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_steps", default=0.0, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--weight_decay", default=1e-3, type=float, help="weight_decay") parser.add_argument("--save_checkpoints_steps", default=8000, type=int, help="How often to save the model checkpoint.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=20, help="Number of updates steps to accumualte before performing a backward/update pass.") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = BertConfig.from_pretrained(args.init_model_name, num_labels=2) if args.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length {} because the BERT model was only trained up to sequence length {}".format( args.max_seq_length, bert_config.max_position_embeddings)) if os.path.exists(args.output_dir) and os.listdir(args.output_dir): if args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) else: os.makedirs(args.output_dir, exist_ok=True) tokenizer = BertTokenizer.from_pretrained(args.init_model_name, do_lower_case=args.do_lower_case) if args.data_augmentation: train_dataset = UbuntuDatasetForSP( file_path=os.path.join(args.data_dir, "train_augment_3.txt"), max_seq_length=args.max_seq_length, tokenizer=tokenizer ) else: train_dataset = UbuntuDatasetForSP( file_path=os.path.join(args.data_dir, "train.txt"), max_seq_length=args.max_seq_length, tokenizer=tokenizer ) eval_dataset = UbuntuDatasetForSP( file_path=os.path.join(args.data_dir, "valid.txt"), max_seq_length=args.max_seq_length, tokenizer=tokenizer ) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, sampler=RandomSampler(train_dataset), num_workers=4) eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.eval_batch_size, sampler=SequentialSampler(eval_dataset), num_workers=4) model = BertForSequenceClassification(config=bert_config) model.to(device) num_train_steps = None if args.do_train: num_train_steps = int( len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) # Prepare optimizer param_optimizer = list(model.named_parameters()) # remove pooler, which is not used thus it produce None grad that break apex param_optimizer = [n for n in param_optimizer] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=num_train_steps) else: optimizer = None scheduler = None if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) global_step = 0 best_metric = 0.0 if args.do_train: logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss, _ = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() # We have accumulated enought gradients scheduler.step() model.zero_grad() global_step += 1 if step % args.save_checkpoints_steps == 0: model.eval() f = open(os.path.join(args.output_dir, 'logits_dev.txt'), 'w') eval_loss = 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in eval_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids) logits = logits.detach().cpu().numpy() logits_all.append(logits) label_ids = label_ids.cpu().numpy() for logit, label in zip(logits, label_ids): logit = '{},{}'.format(logit[0], logit[1]) f.write('_\t{}\t{}\n'.format(logit, label)) eval_loss += tmp_eval_loss.mean().item() nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 f.close() logits_all = np.concatenate(logits_all,axis=0) eval_loss = eval_loss / nb_eval_steps result = evaluate(os.path.join(args.output_dir, 'logits_dev.txt')) result.update({'eval_loss': eval_loss}) output_eval_file = os.path.join(args.output_dir, "eval_results_dev.txt") with open(output_eval_file, "a") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) ### Save the best checkpoint if best_metric < result['R10@1'] + result['R10@2']: try: ### Remove 'module' prefix when using DataParallel state_dict = model.module.state_dict() except AttributeError: state_dict = model.state_dict() torch.save(state_dict, os.path.join(args.output_dir, "model.pt")) best_metric = result['R10@1'] + result['R10@2'] logger.info('Saving the best model in {}'.format(os.path.join(args.output_dir, "model.pt"))) ### visualize bad cases of the best model # logger.info('Saving Bad cases...') # visualize_bad_cases( # logits=logits_all, # input_file_path=os.path.join(args.data_dir, 'valid.txt'), # output_file_path=os.path.join(args.output_dir, 'valid_bad_cases.txt') # ) model.train()
class bert_classifier(object): def __init__(self): self.config = Config() self.device_setup() self.model_setup() def device_setup(self): """ 设备配置并加载BERT模型 :return: """ # 使用GPU,通过model.to(device)的方式使用 self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") model_save_path = self.config.get("result", "model_save_path") config_save_path = self.config.get("result", "config_save_path") vocab_save_path = self.config.get("result", "vocab_save_path") self.model_config = BertConfig.from_json_file(config_save_path) self.model = BertForSequenceClassification(self.model_config) self.state_dict = torch.load(model_save_path) self.model.load_state_dict(self.state_dict) self.tokenizer = transformers.BertTokenizer(vocab_save_path) self.model.to(self.device) self.model.eval() def model_setup(self): weight_decay = self.config.get("training_rule", "weight_decay") learning_rate = self.config.get("training_rule", "learning_rate") # 定义优化器和损失函数 # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': weight_decay }, { 'params': [ p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] self.optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate) self.criterion = nn.CrossEntropyLoss() def predict(self, sentence): input_ids, token_type_ids = convert_text_to_ids( self.tokenizer, sentence) input_ids = seq_padding(self.tokenizer, [input_ids]) token_type_ids = seq_padding(self.tokenizer, [token_type_ids]) # 需要 LongTensor input_ids, token_type_ids = input_ids.long(), token_type_ids.long() # 梯度清零 self.optimizer.zero_grad() # 迁移到GPU input_ids, token_type_ids = input_ids.to( self.device), token_type_ids.to(self.device) output = self.model(input_ids=input_ids, token_type_ids=token_type_ids) y_pred_prob = output[0] y_pred_label = y_pred_prob.argmax(dim=1) print(y_pred_label)
class init_class: def __init__(self): set_seed() self.sess = [] self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.tokenizer = BertTokenizer.from_pretrained('../user_data/vocab') for model_name in ['bert', 'rbtl']: model_config = BertConfig.from_pretrained( pretrained_model_name_or_path= "../user_data/bert_source/{}_config.json".format(model_name)) model_config.vocab_size = len( pd.read_csv('../user_data/vocab', names=["score"])) self.model = BertForSequenceClassification(config=model_config) checkpoint = torch.load( '../user_data/save_model/{}_checkpoint.pth.tar'.format( model_name), map_location='cpu') self.model.load_state_dict(checkpoint['status']) #pytorch转onnx MODEL_ONNX_PATH = "./torch_{}_dynamic.onnx".format(model_name) OPERATOR_EXPORT_TYPE = torch._C._onnx.OperatorExportTypes.ONNX self.model.eval() org_dummy_input = make_train_dummy_input() inf_dummy_input = make_inference_dummy_input() dynamic_axes = { 'input_ids': [1], 'token_type_ids': [1], 'attention_mask': [1] } output = torch.onnx.export( self.model, org_dummy_input, MODEL_ONNX_PATH, verbose=False, operator_export_type=OPERATOR_EXPORT_TYPE, opset_version=10, input_names=['input_ids', 'token_type_ids', 'attention_mask'], output_names=['output'], dynamic_axes=dynamic_axes) self.sess.append(onnxruntime.InferenceSession(MODEL_ONNX_PATH)) def __getitem__(self, text): inputs = self.tokenizer(text, return_tensors="pt") result = [] for sess in self.sess: pred_onnx = sess.run( None, { 'input_ids': inputs['input_ids'].numpy(), 'token_type_ids': inputs['token_type_ids'].numpy(), 'attention_mask': inputs['attention_mask'].numpy() }) pred_pob = torch.nn.functional.softmax(torch.tensor(pred_onnx[0]), dim=1)[:, 1] result.append(pred_pob[0].cpu().item()) return np.mean(result)
def test_model(test_data_dir): """ Use trained models to get the final prediction """ pretrained_models = ['bert-base-uncased', 'xlnet-base-cased', 'roberta-base'] # load testing data into pandas DataFrame with open(test_data_dir) as f: test_lines = [line.rstrip('\n')[line.rstrip('\n').find(',') + 1:] for line in f] test_df = pd.DataFrame(test_lines, columns=['text']) # because the model input required some label we won't use this though test_df['label'] = 1 for pretrained_model in pretrained_models: # load model if pretrained_model == 'bert-base-uncased': from transformers import BertForSequenceClassification as SequenceClassificationModel selected_epochs = bert_picks elif pretrained_model == 'xlnet-base-cased': from transformers import XLNetForSequenceClassification as SequenceClassificationModel selected_epochs = xlnet_picks elif pretrained_model == 'roberta-base': from transformers import RobertaForSequenceClassification as SequenceClassificationModel selected_epochs = roberta_picks config = AutoConfig.from_pretrained(pretrained_model) model = SequenceClassificationModel(config) # load tokenizer tokenizer = AutoTokenizer.from_pretrained(pretrained_model) init_token_idx = tokenizer.cls_token_id eos_token_idx = tokenizer.sep_token_id pad_token_idx = tokenizer.pad_token_id unk_token_idx = tokenizer.unk_token_id max_input_length = tokenizer.max_model_input_sizes[pretrained_model] def tokenize_and_cut(sentence): """ Tokenize the sentence and cut it if it's too long """ tokens = tokenizer.tokenize(sentence) # - 2 is for cls and sep tokens tokens = tokens[:max_input_length - 2] return tokens # xlnet model has no max_model_input_sizes field but it acutally has a limit # so we manually set it if max_input_length == None: max_input_length = 512 # Field handles the conversion to Tensor (tokenizing) TEXT = data.Field( batch_first=True, use_vocab=False, tokenize=tokenize_and_cut, preprocessing=tokenizer.convert_tokens_to_ids, init_token=init_token_idx, eos_token=eos_token_idx, pad_token=pad_token_idx, unk_token=unk_token_idx ) LABEL = data.LabelField(dtype=torch.long, use_vocab=False) # transform DataFrame into torchtext Dataset print('Transforming testing data for', pretrained_model, 'model') test_data = DataFrameDataset.splits(text_field=TEXT, label_field=LABEL, test_df=test_df) BATCH_SIZE = 32 # get gpu if possible device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') test_iterator = data.Iterator(test_data, batch_size=BATCH_SIZE, device=device, shuffle=False, sort=False, train=False) for selected_epoch in selected_epochs: # load trained model model.load_state_dict( torch.load(os.path.join( 'models', f'{pretrained_model}-e{selected_epoch:02}-model.pt' ), map_location=device) ) model = model.eval() # get predictions of test data print(f'Testing for {pretrained_model} epoch {selected_epoch}') predictions = test(model, test_iterator) # map predictions to match the original label_map = {0: -1, 1: 1} corrected_predictions = list(map(lambda x: label_map[x], predictions)) # load data into dataframe submission = pd.read_csv('predictions_test/sample_submission.csv') submission.Prediction = corrected_predictions submission.to_csv(os.path.join('predictions_test', f'{pretrained_model}-e{selected_epoch:02}.csv'), index=False) test_predictions('predictions_test')
def test_model(settings: Settings, device: torch.device, model: BertForSequenceClassification, evaluation_dataloader: DataLoader, dataset_type: str): # Test model on the given input dataset. # We can also (???) examine the BERT feature values for sentences which are # associated with hatespeech, versus sentences which are not associated # with hatespeech. settings.write_debug('Starting evaluation: {0} data'.format(dataset_type)) t0 = time.time() # Put the model in evaluation mode--the dropout layers behave differently # during evaluation. model.eval() # Tracking variables eval_loss, eval_accuracy, eval_f1 = 0, 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 # Evaluate data for one epoch for batch in evaluation_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up validation with torch.no_grad(): # Forward pass, calculate logit predictions. # This will return the logits rather than the loss because we have # not provided labels. # token_type_ids is the same as the "segment ids", which # differentiates sentence 1 and 2 in 2-sentence tasks. # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) # Get the "logits" output by the model. The "logits" are the output # values prior to applying an activation function like the softmax. logits = outputs[0] # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Calculate the accuracy for this batch of test sentences. tmp_eval_accuracy = flat_accuracy(logits, label_ids) tmp_f1 = flat_f1(settings, logits, label_ids) # Accumulate the total accuracy. eval_accuracy += tmp_eval_accuracy eval_f1 += tmp_f1 # Track the number of batches nb_eval_steps += 1 settings.write_debug(" Accuracy: {0:.2f}".format(eval_accuracy / nb_eval_steps)) settings.write_debug(" F1: {0:.2f}".format(eval_f1 / nb_eval_steps)) settings.write_debug(" Validation took: {:}".format( format_time(time.time() - t0))) settings.write_debug('Finished evaluation: {0} data'.format(dataset_type))
def train_classifier(model: BertForSequenceClassification, dataset: TensorDataset, validation_ratio: float, batch_size: int, freeze_embeddings_layer: bool, freeze_encoder_layers: int, epochs: int) -> (BertForSequenceClassification, list): device = select_device() train_size = int(validation_ratio * len(dataset)) val_size = len(dataset) - train_size train_dataset, val_dataset = random_split(dataset, [train_size, val_size]) train_dataloader = DataLoader(train_dataset, sampler=RandomSampler(train_dataset), batch_size=batch_size) validation_dataloader = DataLoader(val_dataset, sampler=SequentialSampler(val_dataset), batch_size=batch_size) modules = [] if freeze_embeddings_layer: modules.append(model.bert.embeddings) for i in range(freeze_encoder_layers): modules.append(model.bert.encoder.layer[i]) for module in modules: for param in module.parameters(): param.requires_grad = False model.to(device) optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=5e-5) total_steps = len(train_dataloader) * epochs scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps) training_stats = [] total_t0 = time.time() for epoch_i in range(0, epochs): print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') t0 = time.time() total_train_loss = 0 model.train() for step, batch in enumerate(train_dataloader): if step % 40 == 0 and not step == 0: elapsed = format_time(time.time() - t0) print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format( step, len(train_dataloader), elapsed)) b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) model.zero_grad() outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) loss = outputs.loss logits = outputs.logits total_train_loss += loss.item() loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() avg_train_loss = total_train_loss / len(train_dataloader) training_time = format_time(time.time() - t0) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(training_time)) print("") print("Running Validation...") t0 = time.time() model.eval() total_eval_accuracy = 0 total_eval_loss = 0 nb_eval_steps = 0 for batch in validation_dataloader: b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) with torch.no_grad(): outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) loss = outputs.loss logits = outputs.logits total_eval_loss += loss.item() logits = logits.detach().cpu().numpy() label_ids = b_labels.cpu().numpy() total_eval_accuracy += flat_accuracy(logits, label_ids) avg_val_accuracy = total_eval_accuracy / len(validation_dataloader) print(" Accuracy: {0:.2f}".format(avg_val_accuracy)) avg_val_loss = total_eval_loss / len(validation_dataloader) validation_time = format_time(time.time() - t0) print(" Validation Loss: {0:.2f}".format(avg_val_loss)) print(" Validation took: {:}".format(validation_time)) training_stats.append({ 'epoch': epoch_i + 1, 'Training Loss': avg_train_loss, 'Valid. Loss': avg_val_loss, 'Valid. Accur.': avg_val_accuracy, 'Training Time': training_time, 'Validation Time': validation_time }) print("") print("Training complete!") print("Total training took {:} (h:mm:ss)".format( format_time(time.time() - total_t0))) return model, training_stats
class AdapterCompositionTest(unittest.TestCase): def setUp(self): self.model = BertForSequenceClassification(BertConfig()) self.model.add_adapter("a") self.model.add_adapter("b") self.model.add_adapter("c") self.model.add_adapter("d") self.model.to(torch_device) self.model.train() def training_pass(self): inputs = {} inputs["input_ids"] = ids_tensor((1, 128), 1000) inputs["labels"] = torch.ones(1, dtype=torch.long) loss = self.model(**inputs).loss loss.backward() def batched_training_pass(self): inputs = {"input_ids": ids_tensor((4, 128), 1000), "labels": torch.ones(4, dtype=torch.long)} loss = self.model(**inputs).loss loss.backward() def test_simple_split(self): # pass over split setup self.model.set_active_adapters(Split("a", "b", 64)) self.training_pass() def test_stacked_split(self): # split into two stacks self.model.set_active_adapters(Split(Stack("a", "b"), Stack("c", "d"), split_index=64)) self.training_pass() def test_stacked_fusion(self): self.model.add_adapter_fusion(Fuse("b", "d")) # fuse two stacks self.model.set_active_adapters(Fuse(Stack("a", "b"), Stack("c", "d"))) self.training_pass() def test_mixed_stack(self): self.model.add_adapter_fusion(Fuse("a", "b")) self.model.set_active_adapters(Stack("a", Split("c", "d", split_index=64), Fuse("a", "b"))) self.training_pass() def test_nested_split(self): # split into two stacks self.model.set_active_adapters(Split(Split("a", "b", split_index=32), "c", split_index=64)) self.training_pass() def test_parallel(self): self.model.set_active_adapters(Parallel("a", "b", "c", "d")) inputs = {} inputs["input_ids"] = ids_tensor((1, 128), 1000) logits = self.model(**inputs).logits self.assertEqual(logits.shape, (4, 2)) def test_nested_parallel(self): self.model.set_active_adapters(Stack("a", Parallel(Stack("b", "c"), "d"))) inputs = {} inputs["input_ids"] = ids_tensor((1, 128), 1000) logits = self.model(**inputs).logits self.assertEqual(logits.shape, (2, 2)) def test_batch_split(self): self.model.set_active_adapters(BatchSplit("a", "b", "c", batch_sizes=[1, 1, 2])) self.batched_training_pass() def test_batch_split_int(self): self.model.set_active_adapters(BatchSplit("a", "b", batch_sizes=2)) self.batched_training_pass() def test_nested_batch_split(self): self.model.set_active_adapters(Stack("a", BatchSplit("b", "c", batch_sizes=[2, 2]))) self.batched_training_pass() def test_batch_split_invalid(self): self.model.set_active_adapters(BatchSplit("a", "b", batch_sizes=[3, 4])) with self.assertRaises(IndexError): self.batched_training_pass() def test_batch_split_equivalent(self): self.model.set_active_adapters("a") self.model.eval() input_ids = ids_tensor((2, 128), 1000) output_a = self.model(input_ids[:1]) self.model.set_active_adapters("b") output_b = self.model(input_ids[1:2]) self.model.set_active_adapters(BatchSplit("a", "b", batch_sizes=[1, 1])) output = self.model(input_ids) self.assertTrue(torch.allclose(output_a[0], output[0][0], atol=1e-6)) self.assertTrue(torch.allclose(output_b[0], output[0][1], atol=1e-6))
from soynlp.normalizer import repeat_normalize finetune_ckpt = './your_local_path/BaekBERT.ckpt' test_path = '../data/testset/inferset.csv' device = 'cuda' if torch.cuda.is_available() else 'cpu' args = Arg() ckp = torch.load(finetune_ckpt, map_location=torch.device('cpu')) pretrained_model_config = BertConfig.from_pretrained( args.pretrained_model, num_labels=ckp['state_dict']['bert.classifier.bias'].shape.numel(), ) model = BertForSequenceClassification(pretrained_model_config) model.load_state_dict({k[5:]: v for k, v in ckp['state_dict'].items()}) model.to(device) model.eval() def read_data(path): if path.endswith('xlsx'): return pd.read_excel(path) elif path.endswith('csv'): return pd.read_csv(path) elif path.endswith('tsv') or path.endswith('txt'): return pd.read_csv(path, sep='\t') else: raise NotImplementedError( 'Only Excel(xlsx)/Csv/Tsv(txt) are Supported') def preprocess_dataframe(df):
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default='/hdd/lujunyu/dataset/multi_turn_corpus/ubuntu/', type=str, required=False, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--task_name", default='ubuntu', type=str, required=False, help="The name of the task to train.") parser.add_argument( "--output_dir", default='/hdd/lujunyu/model/chatbert/ubuntu_base_si/', type=str, required=False, help="The output directory where the model checkpoints will be written." ) parser.add_argument( "--init_checkpoint", default='/hdd/lujunyu/model/chatbert/ubuntu_base_si_aug/model.pt', type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") ## Other parameters parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help= "Whether to lower case the input text. True for uncased models, False for cased models." ) parser.add_argument( "--max_seq_length", default=256, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--eval_batch_size", default=2000, type=int, help="Total batch size for eval.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') bert_config = BertConfig.from_pretrained('bert-base-uncased') tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=args.do_lower_case) if args.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length {} because the BERT model was only trained up to sequence length {}" .format(args.max_seq_length, bert_config.max_position_embeddings)) test_dataset = UbuntuDataset(file_path=os.path.join( args.data_dir, "test.txt"), max_seq_length=args.max_seq_length, tokenizer=tokenizer) test_dataloader = torch.utils.data.DataLoader( test_dataset, batch_size=args.eval_batch_size, sampler=SequentialSampler(test_dataset), num_workers=4) model = BertForSequenceClassification(bert_config).from_pretrained( args.init_checkpoint, config=bert_config) model.to(device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) logger.info("***** Running testing *****") logger.info(" Num examples = %d", len(test_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) f = open(os.path.join(args.output_dir, 'logits_test.txt'), 'w') model.eval() test_loss = 0 nb_test_steps, nb_test_examples = 0, 0 for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader, desc="Step"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): tmp_test_loss, logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() for logit, label in zip(logits, label_ids): logit = '{},{}'.format(logit[0], logit[1]) f.write('_\t{}\t{}\n'.format(logit, label)) test_loss += tmp_test_loss.mean().item() nb_test_examples += input_ids.size(0) nb_test_steps += 1 f.close() test_loss = test_loss / nb_test_steps result = evaluate(os.path.join(args.output_dir, 'logits_test.txt')) result.update({'test_loss': test_loss}) output_eval_file = os.path.join(args.output_dir, "results_test.txt") with open(output_eval_file, "w") as writer: logger.info("***** Test results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key])))