def create_examples(self, lines, example_type, cached_examples_file): ''' Creates examples for data ''' pbar = ProgressBar(n_total=len(lines), desc='create examples') if cached_examples_file.exists(): logger.info("Loading examples from cached file %s", cached_examples_file) examples = torch.load(cached_examples_file) else: examples = [] for i, line in enumerate(lines): guid = '%s-%d' % (example_type, i) text_a = line[0] text_b = line[1] label = line[2] label = int(label) example = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label) examples.append(example) pbar(step=i) logger.info("Saving examples into cached file %s", cached_examples_file) torch.save(examples, cached_examples_file) return examples
def evaluate(args, model, tokenizer, prefix=""): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, data_type='dev') if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn) # Eval! logger.info("********* Running evaluation {} ********".format(prefix)) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None pbar = ProgressBar(n_total=len(eval_dataloader), desc="Evaluating") for step, batch in enumerate(eval_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert', 'roberta'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) pbar(step) print(' ') if 'cuda' in str(args.device): torch.cuda.empty_cache() eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) logger.info("******** Eval results {} ********".format(prefix)) for key in sorted(result.keys()): logger.info(" dev: %s = %s", key, str(result[key])) return results
def load_and_cache_examples(args, task, tokenizer, data_type='train'): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier( ) # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, 'cached_{}_{}_{}_{}'.format( data_type, list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task))) if os.path.exists(cached_features_file): logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) # os.makedirs(cached_features_file, exist_ok=True, mode=0o777) if task == "ner": label_list = processor.get_labels_ner(args.data_dir, args.label_with_bi) else: label_list = processor.get_labels() if task in ['mnli', 'mnli-mm'] and 'roberta' in args.model_type: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] if data_type == 'train': examples = processor.get_train_examples(args.data_dir) elif data_type == 'dev': examples = processor.get_dev_examples(args.data_dir) else: examples = processor.get_test_examples(args.data_dir) features = convert_examples_to_features( examples, tokenizer, label_list=label_list, max_seq_length=args.max_seq_length, output_mode=output_mode) if args.local_rank == 0 and not evaluate: torch.distributed.barrier( ) # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) all_lens = torch.tensor([f.input_len for f in features], dtype=torch.long) all_labels = torch.tensor([f.label for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels) return dataset
def load_and_cache_examples(args, task, tokenizer, data_type='train'): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier( ) # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() # Load data features from dataset file logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if data_type == 'train': examples = processor.get_train_examples(args.data_dir) elif data_type == 'dev': examples = processor.get_dev_examples(args.data_dir) else: examples = processor.get_test_examples(args.data_dir) if "bert" in args.model_type: # 数据处理成bert所需的格式 features = convert_examples_to_features_for_bert( examples=examples, tokenizer=tokenizer, label_list=label_list, max_seq_length=args.train_max_seq_length if data_type == 'train' else args.eval_max_seq_length, cls_token_at_end=bool(args.model_type in ["xlnet"]), pad_on_left=bool(args.model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token ])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, ) else: # 一般格式 features = convert_examples_to_features_for_lstm( examples=examples, label_list=label_list, max_seq_length=args.train_max_seq_length if data_type == 'train' else args.eval_max_seq_length, tokenizer=tokenizer) if args.local_rank == 0 and not evaluate: torch.distributed.barrier( ) # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) all_lens = torch.tensor([f.input_len for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_lens, all_label_ids) return dataset
def load_and_cache_examples(args, task, tokenizer, data_type='train'): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() # Load data features from cache or dataset file cached_features_file = os.path.join(args.data_dir, 'cached_crf-{}_{}_{}_{}'.format( data_type, list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.train_max_seq_length if data_type=='train' else args.eval_max_seq_length), str(task))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: label_list = processor.get_labels() if data_type == 'train': logger.info("Creating features from dataset file at %s", args.train_data_path) examples = processor.get_train_examples(args.train_data_path) elif data_type == 'dev': logger.info("Creating features from dataset file at %s", args.dev_data_path) examples = processor.get_dev_examples(args.dev_data_path) else: logger.info("Creating features from dataset file at %s", args.test_data_path) examples = processor.get_test_examples(args.test_data_path) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, label_list=label_list, max_seq_length=args.train_max_seq_length if data_type=='train' \ else args.eval_max_seq_length, max_attr_length = args.max_attr_length ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset t_all_input_ids = torch.tensor([f.t_input_ids for f in features], dtype=torch.long) t_all_input_mask = torch.tensor([f.t_input_mask for f in features], dtype=torch.long) t_all_segment_ids = torch.tensor([f.t_segment_ids for f in features], dtype=torch.long) t_all_lens = torch.tensor([f.t_input_len for f in features], dtype=torch.long) t_all_orig_to_tok_index = torch.tensor([f.t_orig_to_tok_index for f in features], dtype=torch.long) t_all_word_lens = torch.tensor([f.t_word_len for f in features], dtype=torch.long) a_all_input_ids = torch.tensor([f.a_input_ids for f in features], dtype=torch.long) a_all_input_mask = torch.tensor([f.a_input_mask for f in features], dtype=torch.long) a_all_segment_ids = torch.tensor([f.a_segment_ids for f in features], dtype=torch.long) a_all_lens = torch.tensor([f.a_input_len for f in features], dtype=torch.long) a_all_orig_to_tok_index = torch.tensor([f.a_orig_to_tok_index for f in features], dtype=torch.long) a_all_word_lens = torch.tensor([f.a_word_len for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) dataset = TensorDataset(t_all_input_ids, t_all_input_mask, t_all_segment_ids, t_all_lens, t_all_orig_to_tok_index, t_all_word_lens, a_all_input_ids, a_all_input_mask, a_all_segment_ids, a_all_lens, a_all_orig_to_tok_index, a_all_word_lens, all_label_ids) return dataset
def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, data_type='dev') if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset)# if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None pbar = ProgressBar(n_total=len(eval_dataloader), desc="Evaluating") for step, batch in enumerate(eval_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} inputs['token_type_ids'] = batch[2] outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) pbar(step) print(' ') if 'cuda' in str(args.device): torch.cuda.empty_cache() eval_loss = eval_loss / nb_eval_steps preds = np.argmax(preds, axis=1) result = metrics(eval_task, preds, out_label_ids) results.update(result) logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) return results
def is_valid(self, proxy): proxies = {"http": "http://" + proxy, "https": "http://" + proxy, } try: requests.get(self.TEST_URL, proxies=proxies, timeout=self.TIMEOUT) except Exception as e: logger.error(repr(e)) return False else: logger.info("it's works {0}".format(proxy)) return True
def refresh_train_data(big_file): cmd = f"python prepare_lm_data_ngram.py --do_split --big_file={big_file} " command = cmd logger.info(f"command:{command}") os.system(f"{command}") cmd = f"python prepare_lm_data_ngram.py --do_mdata" command = cmd logger.info(f"command:{command}") os.system(f"{command}")
def is_valid_batch(self, proxys): """批量测试代理是否可用""" i = 1 for proxy in proxys: logger.info('正在发送第 {0} 个请求。\n\r'.format(i)) i += 1 self.is_valid_batch(proxy) logger.info('InvalidProxiesList: {0}'.format(len(self.InvalidProxiesList))) logger.info('***已经有 {0} 个代理被淘汰***'.format(len(self.InvalidProxiesList))) logger.info('UseProxiesList: {0}'.format(self.UseProxiesList)) logger.info('可用代理数量 {0}'.format(len(self.UseProxiesList)))
def __init__(self, training_path, file_id, tokenizer, data_name, reduce_memory=False): self.tokenizer = tokenizer self.file_id = file_id data_file = training_path / f"{data_name}_file_{self.file_id}.json" metrics_file = training_path / f"{data_name}_file_{self.file_id}_metrics.json" assert data_file.is_file() and metrics_file.is_file() metrics = json.loads(metrics_file.read_text()) num_samples = metrics['num_training_examples'] seq_len = metrics['max_seq_len'] self.temp_dir = None self.working_dir = None if reduce_memory: self.temp_dir = TemporaryDirectory() self.working_dir = Path(self.temp_dir.name) input_ids = np.memmap(filename=self.working_dir / 'input_ids.memmap', mode='w+', dtype=np.int32, shape=(num_samples, seq_len)) input_masks = np.memmap(filename=self.working_dir / 'input_masks.memmap', shape=(num_samples, seq_len), mode='w+', dtype=np.bool) segment_ids = np.memmap(filename=self.working_dir / 'segment_ids.memmap', shape=(num_samples, seq_len), mode='w+', dtype=np.bool) lm_label_ids = np.memmap(filename=self.working_dir / 'lm_label_ids.memmap', shape=(num_samples, seq_len), mode='w+', dtype=np.int32) lm_label_ids[:] = -1 is_nexts = np.memmap(filename=self.working_dir / 'is_nexts.memmap', shape=(num_samples,), mode='w+', dtype=np.bool) else: input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32) input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool) segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool) lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1) is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool) logger.info(f"Loading training examples for {str(data_file)}") with data_file.open() as f: for i, line in enumerate(f): line = line.strip() example = json.loads(line) features = convert_example_to_features(example, tokenizer, seq_len) input_ids[i] = features.input_ids segment_ids[i] = features.segment_ids input_masks[i] = features.input_mask lm_label_ids[i] = features.lm_label_ids is_nexts[i] = features.is_next assert i == num_samples - 1 # Assert that the sample count metric was true logger.info("Loading complete!") self.num_samples = num_samples self.seq_len = seq_len self.input_ids = input_ids self.input_masks = input_masks self.segment_ids = segment_ids self.lm_label_ids = lm_label_ids self.is_nexts = is_nexts
def predict(args, model, tokenizer, prefix=""): '''模型预测''' pred_output_dir = args.output_dir if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]: os.makedirs(pred_output_dir) test_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='test') # Note that DistributedSampler samples randomly test_sampler = SequentialSampler( test_dataset) if args.local_rank == -1 else DistributedSampler( test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=1, collate_fn=collate_fn) # Eval! logger.info("***** Running prediction %s *****", prefix) logger.info(" Num examples = %d", len(test_dataset)) logger.info(" Batch size = %d", 1) results = [] output_submit_file = os.path.join(pred_output_dir, prefix, "test_prediction.json") pbar = ProgressBar(n_total=len(test_dataloader), desc="Predicting") for step, batch in enumerate(test_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": None } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) logits = outputs[0] preds = logits.detach().cpu().numpy() preds = np.argmax(preds, axis=2).tolist() preds = preds[0][1:-1] # [CLS]XXXX[SEP] tags = [args.id2label[x] for x in preds] label_entities = get_entities(preds, args.id2label, args.markup) # 得到实体 json_d = {} json_d['id'] = step json_d['tag_seq'] = " ".join(tags) json_d['entities'] = label_entities results.append(json_d) pbar(step) logger.info("\n") with open(output_submit_file, "w") as writer: for record in results: writer.write(json.dumps(record) + '\n')
def create_training_instances(input_file, tokenizer, max_seq_len, short_seq_prob): # def create_training_instances(input_file, tokenizer, max_seq_len, short_seq_prob, # max_ngram, masked_lm_prob, max_predictions_per_seq): """Create `TrainingInstance`s from raw text.""" all_documents = [[]] # Input file format: # (1) One sentence per line. These should ideally be actual sentences, not # entire paragraphs or arbitrary spans of text. (Because we use the # sentence boundaries for the "next sentence prediction" task). # (2) Blank lines between documents. Document boundaries are needed so # that the "next sentence prediction" task doesn't span between documents. with open(input_file, 'r') as f: lines = f.readlines() pbar = ProgressBar(n_total=len(lines), desc='read data') for line_cnt, line in enumerate(lines): line = line.strip() # Empty lines are used as document delimiters if not line: all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[-1].append(tokens) pbar(step=line_cnt) print(' ') # Remove empty documents all_documents = [x for x in all_documents if x] random.shuffle(all_documents) # vocab_words = list(tokenizer.vocab.keys()) instances = [] pbar = ProgressBar(n_total=len(all_documents), desc='create instances') for document_index in range(len(all_documents)): instances.extend( create_instances_from_document(all_documents, document_index, max_seq_len, short_seq_prob)) pbar(step=document_index) # def create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob): print(' ') ex_idx = 0 while ex_idx < 2: instance = instances[ex_idx] logger.info("-------------------------Example-----------------------") logger.info(f"id: {ex_idx}") logger.info( f"tokens: {' '.join([str(x) for x in instance['tokens']])}") # logger.info(f"masked_lm_labels: {' '.join([str(x) for x in instance['masked_lm_labels']])}") logger.info( f"segment_ids: {' '.join([str(x) for x in instance['segment_ids']])}" ) # logger.info(f"masked_lm_positions: {' '.join([str(x) for x in instance['masked_lm_positions']])}") # logger.info(f"is_random_next : {instance['is_random_next']}") ex_idx += 1 random.shuffle(instances) return instances
def main(): parser = ArgumentParser() ## Required parameters parser.add_argument("--do_data", default=True, action='store_true') parser.add_argument('--data_name', default='albert', type=str) parser.add_argument('--max_ngram', default=3, type=int) parser.add_argument("--do_lower_case", default=False, action='store_true') parser.add_argument('--seed', default=42, type=int) # parser.add_argument("--file_num", type=int, default=10, help="Number of dynamic masking to pregenerate (with different masks)") parser.add_argument("--max_seq_len", type=int, default=128) parser.add_argument( "--short_seq_prob", type=float, default=0.1, help="Probability of making a short sentence as a training example") parser.add_argument( "--masked_lm_prob", type=float, default=0.15, help="Probability of masking each token for the LM task") # 128 * 0.15 parser.add_argument( "--max_predictions_per_seq", type=int, default=20, help="Maximum number of tokens to mask in each sequence") args = parser.parse_args() seed_everything(args.seed) from configs.base import config args.vocab_path = config['albert_vocab_path'] args.data_dir = config['data_dir'] logger.info("pregenerate training data parameters:\n %s", args) tokenizer = BertTokenizer(vocab_file=args.vocab_path, do_lower_case=args.do_lower_case) small_path = config['data_dir'] / "corpus/small" files = sorted( [f for f in small_path.iterdir() if f.exists() and '.txt' in str(f)]) file_path = files[0].absolute() max_seq_len = args.max_seq_len train(file_path, tokenizer, max_seq_len) print(" dataloader ok! ") sys.exit(0)
def load_and_cache_examples(args, task, tokenizer, data_type='train'): '''加载数据''' if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier( ) # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, 'cached_soft-{}_{}_{}_{}'.format( data_type, list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.train_max_seq_length if data_type == 'train' else args.eval_max_seq_length), str(task))) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if data_type == 'train': examples = processor.get_train_examples(args.data_dir) elif data_type == 'dev': examples = processor.get_dev_examples(args.data_dir) else: examples = processor.get_test_examples(args.data_dir) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, label_list=label_list, max_seq_length=args.train_max_seq_length if data_type=='train' \ else args.eval_max_seq_length, cls_token_at_end=bool(args.model_type in ["xlnet"]), pad_on_left=bool(args.model_type in ['xlnet']), cls_token = tokenizer.cls_token, cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier( ) # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) all_lens = torch.tensor([f.input_len for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_lens, all_label_ids) return dataset
def apicall(): logger.info('<<albert_cls') try: start = time.time() req_data = request.get_data(as_text=True) logger.info(req_data) if req_data: req_data = json.loads(req_data) text = req_data['text'] task = req_data['task'] dict_res = for_server(text=text, task=task) logger.info("时间{}".format(time.time() - start)) dict_ = {} dict_['status'] = 'success' dict_['results'] = dict_res return jsonify(dict_) else: res = {'status': 'failed', 'results': '没有收到request消息'} return jsonify(res) except BadRequestKeyError as e: logger.error(e) res = {'status': 'failed', 'results': str(e)} return jsonify(res) except FileNotFoundError as e: logger.error(e) res = {'status': 'failed', 'results': e.strerror} return jsonify(res) except Exception as e: logger.error(e) res = {'status': 'failed', 'results': str(e)} return jsonify(res)
def predict(args, model, tokenizer, prefix=""): pred_output_dir = args.output_dir if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]: os.makedirs(pred_output_dir) test_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='test') print(len(test_dataset)) # Note that DistributedSampler samples randomly test_sampler = SequentialSampler( test_dataset) if args.local_rank == -1 else DistributedSampler( test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=1, collate_fn=collate_fn) # Eval! logger.info("***** Running prediction %s *****", prefix) logger.info(" Num examples = %d", len(test_dataset)) logger.info(" Batch size = %d", 1) results = [] output_submit_file = os.path.join(pred_output_dir, prefix, "test_prediction.json") pbar = ProgressBar(n_total=len(test_dataloader), desc="Predicting") for step, batch in enumerate(test_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "start_positions": None, "end_positions": None } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) start_logits, end_logits = outputs[:2] R = bert_extract_item(start_logits, end_logits) if R: label_entities = [[args.id2label[x[0]], x[1], x[2]] for x in R] else: label_entities = [] json_d = {} json_d['id'] = step json_d['entities'] = label_entities results.append(json_d) pbar(step) print(" ") with open(output_submit_file, "w") as writer: for record in results: writer.write(json.dumps(record) + '\n')
def load_and_cache_examples(args, task, tokenizer, data_type='train'): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier( ) # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() #[B I O] if data_type == 'train': examples = processor.get_train_examples(args.data_dir) elif data_type == 'dev': examples = processor.get_dev_examples(args.data_dir) else: examples = processor.get_test_examples(args.data_dir) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, label_list=label_list, max_seq_length=args.max_seq_length if data_type == 'train' \ else args.max_seq_length, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0, ) return features
def predict(args, model, tokenizer, lines, prefix=""): pred_output_dir = args.output_dir if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]: os.makedirs(pred_output_dir) test_dataset = load_and_cache_examples(args, args.task_name, tokenizer, lines, data_type='test') # Note that DistributedSampler samples randomly test_sampler = SequentialSampler( test_dataset) if args.local_rank == -1 else DistributedSampler( test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=1, collate_fn=collate_fn) # Eval! logger.info("***** Running prediction %s *****", prefix) logger.info(" Num examples = %d", len(test_dataset)) logger.info(" Batch size = %d", 1) results = [] pbar = ProgressBar(n_total=len(test_dataloader), desc="Predicting") if isinstance(model, nn.DataParallel): model = model.module for step, batch in enumerate(test_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": None, 'input_lens': batch[4] } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) logits = outputs[0] preds, _ = model.crf._obtain_labels(logits, args.id2label, inputs['input_lens']) preds = preds[0][1:-1] # [CLS]XXXX[SEP] label_entities = get_entities(preds, args.id2label, args.markup) json_d = {} json_d['id'] = step json_d['tag_seq'] = " ".join(preds) json_d['entities'] = label_entities results.append(json_d) pbar(step) print(results[:3])
def predict(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) pred_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) pred_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) results = {} for pred_task, pred_output_dir in zip(pred_task_names, pred_outputs_dirs): pred_dataset = load_and_cache_examples(args, pred_task, tokenizer, data_type='test') if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]: os.makedirs(pred_output_dir) args.pred_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly pred_sampler = SequentialSampler(pred_dataset) if args.local_rank == -1 else DistributedSampler(pred_dataset) pred_dataloader = DataLoader(pred_dataset, sampler=pred_sampler, batch_size=args.pred_batch_size, collate_fn=collate_fn) logger.info("***** Running prediction {} *****".format(prefix)) logger.info(" Num examples = %d", len(pred_dataset)) logger.info(" Batch size = %d", args.pred_batch_size) nb_pred_steps = 0 preds = None pbar = ProgressBar(n_total=len(pred_dataloader), desc="Predicting") for step, batch in enumerate(pred_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if ( 'bert' in args.model_type or 'xlnet' in args.model_type) else None # XLM, DistilBERT and RoBERTa don't use segment_ids outputs = model(**inputs) _, logits = outputs[:2] nb_pred_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) pbar(step) print(' ') if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) output_pred_file = os.path.join(pred_output_dir, prefix, "test_prediction.txt") with open(output_pred_file, "w") as writer: for pred in preds: writer.write(str(pred) + '\n') return results
def train(args, train_dataset, model, tokenizer, config): """ Train the model """ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler( train_dataset) if args.local_rank == -1 else DistributedSampler( train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // ( len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len( train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] bert_param_optimizer = list(model.bert.named_parameters()) crf_param_optimizer = list(model.crf.named_parameters()) linear_param_optimizer = list(model.classifier.named_parameters()) optimizer_grouped_parameters = [{ 'params': [ p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.learning_rate }, { 'params': [ p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.learning_rate }, { 'params': [ p for n, p in crf_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [p for n, p in crf_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }] args.warmup_steps = int(t_total * args.warmup_proportion) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join( args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt")): # Load in optimizer and scheduler states optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path ) and "checkpoint" in args.model_name_or_path: # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) # train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) best_eval_p = 0.0 best_eval_r = 0.0 best_eval_f1 = 0.0 for _ in range(int(args.num_train_epochs)): # pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], 'input_lens': batch[4] } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) loss = outputs[ 0] # model outputs are always tuple in pytorch-transformers (see doc) if args.n_gpu > 1: loss = loss.mean( ) # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # pbar(step, {'loss': loss.item()}) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() global_step += 1 # if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # # Log metrics # print(" ") # if args.local_rank == -1: # # Only evaluate when single GPU otherwise metrics may not average well # evaluate(args, model, tokenizer) # if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # # Save model checkpoint # output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # if not os.path.exists(output_dir): # os.makedirs(output_dir) # model_to_save = ( # model.module if hasattr(model, "module") else model # ) # Take care of distributed/parallel training # model_to_save.save_pretrained(output_dir) # torch.save(args, os.path.join(output_dir, "training_args.bin")) # logger.info("Saving model checkpoint to %s", output_dir) # tokenizer.save_vocabulary(output_dir) # torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) # torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) # logger.info("Saving optimizer and scheduler states to %s", output_dir) # best eval results = evaluate(args, model, tokenizer) if results["f1"] > best_eval_f1: best_eval_p = results['acc'] best_eval_r = results['recall'] best_eval_f1 = results["f1"] ## 保存最好模型 args.best_eval_output_dir = os.path.join(args.output_dir) if not os.path.exists(args.best_eval_output_dir): os.makedirs(args.best_eval_output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.best_eval_output_dir) torch.save( args, os.path.join(args.best_eval_output_dir, "training_args.bin")) logger.info("eval results: p:{:.4f} r:{:.4f} f1:{:.4f}".format( best_eval_p, best_eval_r, best_eval_f1)) logger.info("Saving step:{} as best model to {}".format( global_step, args.best_eval_output_dir)) tokenizer.save_vocabulary(args.best_eval_output_dir) torch.save(optimizer.state_dict(), os.path.join(args.best_eval_output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(args.best_eval_output_dir, "scheduler.pt")) # logger.info("Saving optimizer and scheduler states to %s", args.best_eval_output_dir) # config_file = os.path.join(args.best_eval_output_dir, "best_config.json") # json.dump(config, config_file) logger.info("\n") if 'cuda' in str(args.device): torch.cuda.empty_cache() if args.do_predict: config_class, model_class, tokenizer_class = MODEL_CLASSES[ args.model_type] model = model_class.from_pretrained(args.best_eval_output_dir, config=config) model.to(args.device) predict(args, model, tokenizer) return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""): metric = SeqEntityScore(args.id2label, markup=args.markup) eval_output_dir = args.output_dir if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) eval_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='dev') args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler( eval_dataset) if args.local_rank == -1 else DistributedSampler( eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn) # Eval! logger.info("***** Running evaluation %s *****", prefix) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 # pbar = ProgressBar(n_total=len(eval_dataloader), desc="Evaluating") eval_iterator = tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) if isinstance(model, nn.DataParallel): model = model.module for step, batch in enumerate(eval_iterator): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], 'input_lens': batch[4] } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] tags = model.crf.decode(logits, inputs['attention_mask']) if args.n_gpu > 1: tmp_eval_loss = tmp_eval_loss.mean( ) # mean() to average on multi-gpu parallel evaluating eval_loss += tmp_eval_loss.item() nb_eval_steps += 1 out_label_ids = inputs['labels'].cpu().numpy().tolist() input_lens = inputs['input_lens'].cpu().numpy().tolist() tags = tags.squeeze(0).cpu().numpy().tolist() for i, label in enumerate(out_label_ids): temp_1 = [] temp_2 = [] for j, m in enumerate(label): if j == 0: continue elif j == input_lens[i] - 1: metric.update(pred_paths=[temp_2], label_paths=[temp_1]) break else: temp_1.append(args.id2label[out_label_ids[i][j]]) temp_2.append(args.id2label[tags[i][j]]) # pbar(step) logger.info("\n") eval_loss = eval_loss / nb_eval_steps eval_info, entity_info = metric.result() results = {f'{key}': value for key, value in eval_info.items()} results['loss'] = eval_loss logger.info("***** Eval results %s *****", prefix) info = "-".join( [f' {key}: {value:.4f} ' for key, value in results.items()]) logger.info(info) logger.info("***** Entity results %s *****", prefix) for key in sorted(entity_info.keys()): logger.info("******* %s results ********" % key) info = "-".join([ f' {key}: {value:.4f} ' for key, value in entity_info[key].items() ]) logger.info(info) return results
def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument('--markup', default='bios', type=str, choices=['bios', 'bio']) parser.add_argument('--loss_type', default='ce', type=str, choices=['lsr', 'focal', 'ce']) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help= "Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--train_max_seq_length", default=128, type=int, help= "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--eval_max_seq_length", default=512, type=int, help= "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Whether to run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.") parser.add_argument("--do_adv", action="store_true", help="Whether to adversarial training.") parser.add_argument('--adv_epsilon', default=1.0, type=float, help="Epsilon for adversarial.") parser.add_argument('--adv_name', default='word_embeddings', type=str, help="name for adversarial layer.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--max_steps", default=-1, type=int, help= "If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training." ) parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help= "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument( '--predict_checkpoints', action="store_true", help= "Predict checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory") parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help= "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, loss_type=args.loss_type, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = (model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_vocabulary(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))) logging.getLogger("pytorch_transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split( "-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) if global_step: result = { "{}_{}".format(global_step, k): v for k, v in result.items() } results.update(result) output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("{} = {}\n".format(key, str(results[key]))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.predict_checkpoints > 0: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging checkpoints = [ x for x in checkpoints if x.split('-')[-1] == str(args.predict_checkpoints) ] logger.info("Predict the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) predict(args, model, tokenizer, prefix=prefix)
def main(): args = deal_parser() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name)) if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed seed_everything(args.seed) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab config = BertConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) # albert model # tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case,spm_model_file=args.spm_model_file) # model = AlbertForSequenceClassification.from_pretrained(args.model_name_or_path,from_tf=bool('.ckpt' in args.model_name_or_path),config=config) # bert model tokenizer = tokenization_bert.BertTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) model = BertForSequenceClassification.from_pretrained( args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr( model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Evaluation results = [] if args.do_eval and args.local_rank in [-1, 0]: # albert model #tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file,do_lower_case=args.do_lower_case,spm_model_file=args.spm_model_file) # bert model tokenizer = tokenization_bert.BertTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) checkpoints = [(0, args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints, key=lambda x: x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) for _, checkpoint in checkpoints: global_step = checkpoint.split( '-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" # albert model # model = AlbertForSequenceClassification.from_pretrained(checkpoint) # bert model model = BertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key, value in results: writer.write("%s = %s\n" % (key, str(value))) # Test results = [] if args.do_predict and args.local_rank in [-1, 0]: # albert model # tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file,do_lower_case=args.do_lower_case,spm_model_file=args.spm_model_file) # bert model tokenizer = tokenization_bert.BertTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) checkpoints = [(0, args.output_dir)] if args.predict_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints, key=lambda x: x[0]) logger.info("Test the following checkpoints: %s", checkpoints) for _, checkpoint in checkpoints: global_step = checkpoint.split( '-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" # albert model # model = AlbertForSequenceClassification.from_pretrained(checkpoint) # bert model model = BertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = test(args, model, tokenizer, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_test_file = os.path.join(args.output_dir, "checkpoint_test_results.txt") with open(output_test_file, "w") as writer: for key, value in results: writer.write("%s = %s\n" % (key, str(value)))
def train(args, train_dataset, model, tokenizer): """ Train the model """ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler( train_dataset) if args.local_rank == -1 else DistributedSampler( train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) if args.max_steps > 0: num_training_steps = args.max_steps args.num_train_epochs = args.max_steps // ( len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: num_training_steps = len( train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs args.warmup_steps = int(num_training_steps * args.warmup_proportion) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay }, { 'params': [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] # optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_training_steps) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", num_training_steps) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) for _ in range(int(args.num_train_epochs)): pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') for step, batch in enumerate(train_dataloader): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3], 'token_type_ids': batch[2] } outputs = model(**inputs) loss = outputs[ 0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean( ) # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [ -1, 0 ] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1: # Only evaluate when single GPU otherwise metrics may not average well evaluate(args, model, tokenizer) if args.local_rank in [ -1, 0 ] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr( model, 'module' ) else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) pbar(step, {'loss': loss.item()}) print(" ") if 'cuda' in str(args.device): torch.cuda.empty_cache() return global_step, tr_loss / global_step
def test(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) test_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else ( args.task_name, ) test_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else ( args.output_dir, ) results = {} for test_task, test_output_dir in zip(test_task_names, test_outputs_dirs): test_dataset = load_and_cache_examples(args, test_task, tokenizer, data_type='test') if not os.path.exists(test_output_dir) and args.local_rank in [-1, 0]: os.makedirs(test_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max( 1, args.n_gpu) # Note that DistributedSampler samples randomly test_sampler = SequentialSampler( test_dataset) if args.local_rank == -1 else DistributedSampler( test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn) # Test! logger.info("***** Running test {} *****".format(prefix)) logger.info(" Num examples = %d", len(test_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None pbar = ProgressBar(n_total=len(test_dataloader), desc="Testing") for step, batch in enumerate(test_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3], 'token_type_ids': batch[2] } outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) pbar(step) print(' ') if 'cuda' in str(args.device): torch.cuda.empty_cache() eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(test_task, preds, out_label_ids) results.update(result) logger.info("***** Test results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) classreport = ClassReport([ 'Joint', 'Sequence', 'Progression', "Contrast", "Supplement", "Cause-Result", "Result-Cause", "Background", "Behavior-Purpose", "Purpose-Behavior", "Elaboration", "Summary", "Evaluation", "Statement-Illustration", "Illustration-Statement" ]) classreport(preds, out_label_ids) logger.info("%s : %s", classreport.name(), classreport.value()) return results
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: ") parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--vocab_file",default='', type=str) parser.add_argument("--spm_model_file",default='',type=str) ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=512, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--output_eval", action='store_true', help="Whether to write output result.") parser.add_argument("--do_predict", action='store_true', help="Whether to run the model in inference mode on the test set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.") parser.add_argument('--logging_steps', type=int, default=10, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=1000, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") parser.add_argument("--label_with_bi", action='store_true', help="Label with B/I") args = parser.parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name)) if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed seed_everything(args.seed) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name != "ner": raise ValueError("Task error: %s, must be ner" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels_ner(args.data_dir, args.label_with_bi) num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config = AlbertConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) model =AlbertFocalLossForNer.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, label_list, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Evaluation results = [] if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) checkpoints = [(0,args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]),checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints,key =lambda x:x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) for _,checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = AlbertFocalLossForNer.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, label_list, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key,value in results: writer.write("%s = %s\n" % (key, str(value))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) result = evaluate(args, model, tokenizer, label_list, prefix="") output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key,value in result.items(): writer.write("%s = %s\n" % (key, str(value)))
def main(): parser = ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default="dataset", type=str, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--config_path", default="prev_trained_model/electra_small/config.json", type=str) parser.add_argument("--vocab_path", default="prev_trained_model/electra_small/vocab.txt", type=str) parser.add_argument( "--output_dir", default="outputs", type=str, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument("--model_path", default='prev_trained_model/electra_small', type=str) parser.add_argument('--data_name', default='electra', type=str) parser.add_argument( "--file_num", type=int, default=10, help="Number of dynamic masking to pregenerate (with different masks)") parser.add_argument( "--reduce_memory", action="store_true", help= "Store training data as on-disc memmaps to massively reduce memory usage" ) parser.add_argument("--epochs", type=int, default=4, help="Number of epochs to train for") parser.add_argument( "--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument('--num_eval_steps', default=100) parser.add_argument('--num_save_steps', default=2000) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight deay if we apply some.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument("--train_batch_size", default=128, type=int, help="Total batch size for training.") parser.add_argument("--gen_weight", default=1.0, type=float, help='masked language modeling / generator loss') parser.add_argument("--disc_weight", default=50, type=float, help='discriminator loss') parser.add_argument('--untied_generator', action='store_true', help='tie all generator/discriminator weights?') parser.add_argument('--temperature', default=0, type=float, help='temperature for sampling from generator') parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Linear warmup over warmup_steps.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument('--max_grad_norm', default=1.0, type=float) parser.add_argument("--learning_rate", default=0.000176, type=float, help="The initial learning rate for Adam.") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--fp16_opt_level', type=str, default='O2', help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument( '--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--continue_train', default='', help="continue train path") args = parser.parse_args() args.data_dir = Path(args.data_dir) args.output_dir = Path(args.output_dir) pregenerated_data = args.data_dir / "corpus/train" init_logger(log_file=str(args.output_dir / "train_albert_model.log")) assert pregenerated_data.is_dir(), \ "--pregenerated_data should point to the folder of files made by prepare_lm_data_mask.py!" samples_per_epoch = 0 for i in range(args.file_num): data_file = pregenerated_data / f"{args.data_name}_file_{i}.json" metrics_file = pregenerated_data / f"{args.data_name}_file_{i}_metrics.json" if data_file.is_file() and metrics_file.is_file(): metrics = json.loads(metrics_file.read_text()) samples_per_epoch += metrics['num_training_examples'] else: if i == 0: exit("No training data was found!") print( f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs})." ) print( "This script will loop over the available data, but training diversity may be negatively impacted." ) break logger.info(f"samples_per_epoch: {samples_per_epoch}") if args.local_rank == -1 or args.no_cuda: device = torch.device(f"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) args.n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( f"device: {device} , distributed training: {bool(args.local_rank != -1)}, 16-bits training: {args.fp16}" ) if args.gradient_accumulation_steps < 1: raise ValueError( f"Invalid gradient_accumulation_steps parameter: {args.gradient_accumulation_steps}, should be >= 1" ) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps seed_everything(args.seed) tokenizer = BertTokenizer.from_pretrained(args.vocab_path, do_lower_case=args.do_lower_case) total_train_examples = samples_per_epoch * args.epochs num_train_optimization_steps = int(total_train_examples / args.train_batch_size / args.gradient_accumulation_steps) if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size( ) args.warmup_steps = int(num_train_optimization_steps * args.warmup_proportion) bert_config = ElectraConfig.from_pretrained(args.config_path, gen_weight=args.gen_weight, temperature=args.temperature, disc_weight=args.disc_weight) model = ElectraForPreTraining(config=bert_config) if args.continue_train: print(f"Continue train from {args.continue_train}") model = model.from_pretrained(args.continue_train) elif args.model_path: print("载入预训练模型") model.generator = AutoModel.from_pretrained(args.model_path + "/G") model.electra = AutoModel.from_pretrained(args.model_path + "/D") # print(model) model.to(device) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps) # optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) # if args.model_path: # optimizer.load_state_dict(torch.load(args.model_path + "/optimizer.bin")) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if args.n_gpu > 1: # model = BalancedDataParallel(gpu0_bsz=32,dim=0,model).to(device) model = torch.nn.DataParallel(model) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) global_step = 0 g_metric = LMAccuracy() d_metric = AccuracyThresh() tr_g_acc = AverageMeter() tr_d_acc = AverageMeter() tr_loss = AverageMeter() tr_g_loss = AverageMeter() tr_d_loss = AverageMeter() train_logs = {} logger.info("***** Running training *****") logger.info(f" Num examples = {total_train_examples}") logger.info(f" Batch size = {args.train_batch_size}") logger.info(f" Num steps = {num_train_optimization_steps}") logger.info(f" warmup_steps = {args.warmup_steps}") logger.info(f" Num workable gpus = {args.n_gpu}") start_time = time.time() seed_everything(args.seed) # Added here for reproducibility for epoch in range(args.epochs): for idx in range(args.file_num): epoch_dataset = PregeneratedDataset( file_id=idx, training_path=pregenerated_data, tokenizer=tokenizer, reduce_memory=args.reduce_memory, data_name=args.data_name) if args.local_rank == -1: train_sampler = RandomSampler(epoch_dataset) else: train_sampler = DistributedSampler(epoch_dataset) train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size) model.train() nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, lm_label_ids = batch outputs = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, masked_lm_labels=lm_label_ids) loss, g_loss, d_loss, d_logits, g_logits, is_replaced_label = outputs active_indices = input_mask.view(-1) == 1 active_logits = d_logits.view(-1)[active_indices] active_labels = is_replaced_label.view(-1)[active_indices] g_metric(logits=g_logits.view(-1, bert_config.vocab_size), target=lm_label_ids.view(-1)) d_metric(logits=active_logits.view(-1, 1), target=active_labels) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. g_loss = g_loss.mean() d_loss = d_loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() nb_tr_steps += 1 tr_g_acc.update(g_metric.value(), n=input_ids.size(0)) tr_d_acc.update(d_metric.value(), n=input_ids.size(0)) tr_loss.update(loss.item(), n=1) tr_g_loss.update(g_loss.item(), n=1) tr_d_loss.update(d_loss.item(), n=1) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() optimizer.step() optimizer.zero_grad() global_step += 1 if global_step % args.num_eval_steps == 0: now = time.time() eta = now - start_time if eta > 3600: eta_format = ('%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60)) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: eta_format = '%ds' % eta train_logs['loss'] = tr_loss.avg train_logs['g_acc'] = tr_g_acc.avg train_logs['d_acc'] = tr_d_acc.avg train_logs['g_loss'] = tr_g_loss.avg train_logs['d_loss'] = tr_d_loss.avg show_info = f'[Training]:[{epoch}/{args.epochs}]{global_step}/{num_train_optimization_steps} ' \ f'- ETA: {eta_format}' + "-".join( [f' {key}: {value:.4f} ' for key, value in train_logs.items()]) logger.info(show_info) tr_g_acc.reset() tr_d_acc.reset() tr_loss.reset() tr_g_loss.reset() tr_d_loss.reset() start_time = now if global_step % args.num_save_steps == 0: if args.local_rank in [-1, 0] and args.num_save_steps > 0: # Save model checkpoint output_dir = args.output_dir / f'lm-checkpoint-{global_step}' if not output_dir.exists(): output_dir.mkdir() # save model model_to_save = model.module if hasattr( model, 'module' ) else model # Take care of distributed/parallel training model_to_save.save_pretrained(str(output_dir)) torch.save(args, str(output_dir / 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) model.module.generator.save_pretrained( str(output_dir / "G")) logger.info("Saving generator model checkpoint to %s", output_dir / "G") model.module.electra.save_pretrained( str(output_dir / "D")) logger.info("Saving electra model checkpoint to %s", output_dir / "D") torch.save(optimizer.state_dict(), str(output_dir / "optimizer.bin")) # save config output_config_file = output_dir / CONFIG_NAME output_config_file_D = output_dir / "D" / CONFIG_NAME output_config_file_G = output_dir / "G" / CONFIG_NAME with open(str(output_config_file), 'w') as f: f.write(model_to_save.config.to_json_string()) with open(str(output_config_file_D), 'w') as f: f.write( model.module.electra.config.to_json_string()) with open(str(output_config_file_G), 'w') as f: f.write( model.module.generator.config.to_json_string()) # save vocab tokenizer.save_vocabulary(output_dir)
def predict(args, model, tokenizer, prefix=""): metric = SeqEntityScore(args.id2label, markup=args.markup) pred_output_dir = args.output_dir if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]: os.makedirs(pred_output_dir) test_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='test') # Note that DistributedSampler samples randomly test_sampler = SequentialSampler( test_dataset) if args.local_rank == -1 else DistributedSampler( test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=1, collate_fn=collate_fn) # Eval! logger.info("***** Running prediction %s *****", prefix) logger.info(" Num examples = %d", len(test_dataset)) logger.info(" Batch size = %d", 1) f_results = [] output_predict_file = os.path.join(pred_output_dir, prefix, "crf_test_prediction.json") # pbar = ProgressBar(n_total=len(test_dataloader), desc="Predicting") test_iterator = tqdm(test_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) if isinstance(model, nn.DataParallel): model = model.module test_loss = 0.0 nb_test_steps = 0 for step, batch in enumerate(test_iterator): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], 'input_lens': batch[4] } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) tmp_test_loss, logits = outputs[:2] if args.n_gpu > 1: tmp_eval_loss = tmp_test_loss.mean() # print(logits.shape) # print(logits) tags = model.crf.decode(logits, inputs['attention_mask']) tags = tags.squeeze(0).cpu().numpy().tolist() ### 测试集结果保存到文件 preds = tags[0][1:-1] # [CLS]XXXX[SEP] label_entities = get_entities(preds, args.id2label, args.markup) json_d = {} json_d['id'] = step json_d['tag_seq'] = " ".join([args.id2label[x] for x in preds]) json_d['entities'] = label_entities f_results.append(json_d) test_loss += tmp_test_loss nb_test_steps += 1 out_label_ids = inputs['labels'].cpu().numpy().tolist() input_lens = inputs['input_lens'].cpu().numpy().tolist() for i, label in enumerate(out_label_ids): temp_1 = [] temp_2 = [] for j, m in enumerate(label): if j == 0: continue elif j == input_lens[i] - 1: metric.update(pred_paths=[temp_2], label_paths=[temp_1]) break else: temp_1.append(args.id2label[out_label_ids[i][j]]) temp_2.append(args.id2label[tags[i][j]]) ### 打印测试集的最终结果 ### logger.info("\n") test_loss = test_loss / nb_test_steps test_info, entity_info = metric.result() results = {f'{key}': value for key, value in test_info.items()} results['loss'] = test_loss logger.info("***** Test results %s *****", prefix) info = "-".join( [f' {key}: {value:.4f} ' for key, value in results.items()]) logger.info(info) logger.info("\n") logger.info("***** Entity results %s *****", prefix) for key in sorted(entity_info.keys()): logger.info("******* %s results ********" % key) info = "-".join([ f' {key}: {value:.4f} ' for key, value in entity_info[key].items() ]) logger.info(info) # pbar(step) logger.info("\n") with open(output_predict_file, "w") as writer: for record in f_results: writer.write(json.dumps(record) + '\n') if args.task_name == 'cluener': output_submit_file = os.path.join(pred_output_dir, prefix, "test_submit.json") test_text = [] with open(os.path.join(args.data_dir, "test.json"), 'r') as fr: for line in fr: test_text.append(json.loads(line)) test_submit = [] for x, y in zip(test_text, results): json_d = {} json_d['id'] = x['id'] json_d['label'] = {} entities = y['entities'] words = list(x['text']) if len(entities) != 0: for subject in entities: tag = subject[0] start = subject[1] end = subject[2] word = "".join(words[start:end + 1]) if tag in json_d['label']: if word in json_d['label'][tag]: json_d['label'][tag][word].append([start, end]) else: json_d['label'][tag][word] = [[start, end]] else: json_d['label'][tag] = {} json_d['label'][tag][word] = [[start, end]] test_submit.append(json_d) json_to_text(output_submit_file, test_submit)
def main(): args = get_argparse().parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer, config) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
def predict(args, model, tokenizer, label_list, prefix=""): pred_task_names = (args.task_name, ) pred_outputs_dirs = (args.output_dir, ) label_map = {i: label for i, label in enumerate(label_list)} for pred_task, pred_output_dir in zip(pred_task_names, pred_outputs_dirs): pred_dataset = load_and_cache_examples(args, pred_task, tokenizer, data_type='test') if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]: os.makedirs(pred_output_dir) args.pred_batch_size = args.per_gpu_eval_batch_size * max( 1, args.n_gpu) # Note that DistributedSampler samples randomly pred_sampler = SequentialSampler( pred_dataset) if args.local_rank == -1 else DistributedSampler( pred_dataset) pred_dataloader = DataLoader( pred_dataset, sampler=pred_sampler, batch_size=args.pred_batch_size, collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn) logger.info("******** Running prediction {} ********".format(prefix)) logger.info(" Num examples = %d", len(pred_dataset)) logger.info(" Batch size = %d", args.pred_batch_size) nb_pred_steps = 0 preds = None pbar = ProgressBar(n_total=len(pred_dataloader), desc="Predicting") for step, batch in enumerate(pred_dataloader): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3] } if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if ( 'bert' in args.model_type or 'xlnet' in args.model_type ) else None # XLM, DistilBERT and RoBERTa don't use segment_ids outputs = model(**inputs) _, logits = outputs[:2] nb_pred_steps += 1 if preds is None: if pred_task == 'copa': preds = logits.softmax(-1).detach().cpu().numpy() else: preds = logits.detach().cpu().numpy() else: if pred_task == 'copa': preds = np.append( preds, logits.softmax(-1).detach().cpu().numpy(), axis=0) else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) pbar(step) print(' ') if args.output_mode == "classification": predict_label = np.argmax(preds, axis=1) elif args.output_mode == "regression": predict_label = np.squeeze(preds) if pred_task == 'copa': predict_label = [] pred_logits = preds[:, 1] i = 0 while (i < len(pred_logits) - 1): if pred_logits[i] >= pred_logits[i + 1]: predict_label.append(0) else: predict_label.append(1) i += 2 output_submit_file = os.path.join(pred_output_dir, prefix, "test_prediction.json") output_logits_file = os.path.join(pred_output_dir, prefix, "test_logits") # 保存标签结果 with open(output_submit_file, "w") as writer: for i, pred in enumerate(predict_label): json_d = {} json_d['id'] = i json_d['label'] = str(label_map[pred]) writer.write(json.dumps(json_d) + '\n') # 保存中间预测结果 save_numpy(file_path=output_logits_file, data=preds)