def main(): parser = ArgumentParser() parser.add_argument("--arch", default='bert', type=str) parser.add_argument("--do_data", action='store_true') parser.add_argument("--do_train", action='store_true') parser.add_argument("--do_test", action='store_true') parser.add_argument("--save_best", action='store_true') parser.add_argument("--do_lower_case", action='store_true') parser.add_argument('--data_name', default='train', type=str) parser.add_argument("--epochs", default=4, type=int) parser.add_argument("--resume_path", default='', type=str) parser.add_argument("--mode", default='max', type=str) parser.add_argument("--monitor", default='valid_f1', type=str) parser.add_argument("--valid_size", default=0.2, type=float) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--sorted", default=1, type=int, help='1 : True 0:False ') parser.add_argument("--n_gpu", type=str, default='0', help='"0,1,.." or "0" or "" ') parser.add_argument('--gradient_accumulation_steps', type=int, default=1) parser.add_argument("--train_batch_size", default=8, type=int) parser.add_argument('--eval_batch_size', default=8, type=int) parser.add_argument("--train_max_seq_len", default=256, type=int) parser.add_argument("--eval_max_seq_len", default=256, type=int) parser.add_argument('--loss_scale', type=float, default=0) parser.add_argument("--warmup_proportion", default=0.1, type=int, ) parser.add_argument("--weight_decay", default=0.01, type=float) parser.add_argument("--adam_epsilon", default=1e-8, type=float) parser.add_argument("--grad_clip", default=1.0, type=float) parser.add_argument("--learning_rate", default=2e-5, type=float) parser.add_argument('--seed', type=int, default=42) parser.add_argument('--fp16', action='store_true') parser.add_argument('--fp16_opt_level', type=str, default='O1') args = parser.parse_args() config['checkpoint_dir'] = config['checkpoint_dir'] / args.arch config['checkpoint_dir'].mkdir(exist_ok=True) # Good practice: save your training arguments together with the trained model torch.save(args, config['checkpoint_dir'] / 'training_args.bin') seed_everything(args.seed) init_logger(log_file=config['log_dir'] / f"{args.arch}.log") logger.info("Training/evaluation parameters %s", args) if args.do_data: from pybert.io.task_data import TaskData processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels() label2id = {label: i for i, label in enumerate(label_list)} data = TaskData() targets, sentences = data.read_data(raw_data_path=config['raw_data_path'], preprocessor=None, is_train=True,label2id=label2id) data.train_val_split(X=sentences, y=targets, shuffle=True, stratify=targets, valid_size=args.valid_size, data_dir=config['data_dir'], data_name=args.data_name) if args.do_train: run_train(args) if args.do_test: run_test(args)
def main(): parser = ArgumentParser() parser.add_argument("--arch", default='bert', type=str) parser.add_argument("--do_data", action='store_true') parser.add_argument("--train", action='store_true') parser.add_argument("--test", action='store_true') parser.add_argument("--save_best", action='store_true') parser.add_argument("--do_lower_case", action='store_true') parser.add_argument('--data_name', default='job_dataset', type=str) parser.add_argument("--epochs", default=10, type=int) parser.add_argument("--resume_path", default='', type=str) parser.add_argument("--test_path", default='', type=str) parser.add_argument("--mode", default='min', type=str) parser.add_argument("--monitor", default='valid_loss', type=str) parser.add_argument("--valid_size", default=0.05, type=float) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--sorted", default=1, type=int, help='1 : True 0:False ') parser.add_argument("--n_gpu", type=str, default='0', help='"0,1,.." or "0" or "" ') parser.add_argument('--gradient_accumulation_steps', type=int, default=1) parser.add_argument("--train_batch_size", default=4, type=int) parser.add_argument('--eval_batch_size', default=4, type=int) parser.add_argument("--train_max_seq_len", default=256, type=int) parser.add_argument("--eval_max_seq_len", default=256, type=int) parser.add_argument('--loss_scale', type=float, default=0) parser.add_argument( "--warmup_proportion", default=0.1, type=int, ) parser.add_argument("--weight_decay", default=0.01, type=float) parser.add_argument("--adam_epsilon", default=1e-8, type=float) parser.add_argument("--grad_clip", default=1.0, type=float) parser.add_argument("--learning_rate", default=1.0e-4, type=float) parser.add_argument('--seed', type=int, default=42) parser.add_argument('--fp16', action='store_true') parser.add_argument('--fp16_opt_level', type=str, default='O1') parser.add_argument('--predict_labels', type=bool, default=False) parser.add_argument('--predict_idx', type=str, default="0", help=' "idx" or "start-end" or "all" ') args = parser.parse_args() config['checkpoint_dir'] = config['checkpoint_dir'] / args.arch config['checkpoint_dir'].mkdir(exist_ok=True) torch.save(args, config['checkpoint_dir'] / 'training_args.bin') seed_everything(args.seed) init_logger(log_file=config['log_dir'] / f"{args.arch}.log") logger.info("Training/evaluation parameters %s", args) if args.do_data: from pybert.io.task_data import TaskData data = TaskData() targets, sentences = data.read_data( raw_data_path=config['raw_data_path'], preprocessor=EnglishPreProcessor(), is_train=True) data.train_val_split(X=sentences, y=targets, shuffle=False, stratify=False, valid_size=args.valid_size, data_dir=config['data_dir'], data_name=args.data_name) if args.train: run_train(args) if args.test: run_test(args)
def main(): parser = ArgumentParser() parser.add_argument("--arch", default='bert', type=str) parser.add_argument("--do_data", action='store_true') parser.add_argument("--do_train", action='store_true') parser.add_argument("--do_test", action='store_true') parser.add_argument("--save_best", action='store_true') parser.add_argument("--do_lower_case", action='store_true') parser.add_argument('--data_name', default='kaggle', type=str) parser.add_argument("--mode", default='min', type=str) parser.add_argument("--monitor", default='valid_loss', type=str) parser.add_argument("--epochs", default=20, type=int) parser.add_argument("--resume_path", default='', type=str) parser.add_argument("--predict_checkpoints", type=int, default=0) parser.add_argument("--valid_size", default=0.2, type=float) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--sorted", default=1, type=int, help='1 : True 0:False ') parser.add_argument("--n_gpu", type=str, default='0', help='"0,1,.." or "0" or "" ') parser.add_argument('--gradient_accumulation_steps', type=int, default=1) parser.add_argument("--train_batch_size", default=8, type=int) parser.add_argument('--eval_batch_size', default=8, type=int) parser.add_argument("--train_max_seq_len", default=256, type=int) parser.add_argument("--eval_max_seq_len", default=256, type=int) parser.add_argument('--loss_scale', type=float, default=0) parser.add_argument("--warmup_proportion", default=0.1, type=float) parser.add_argument("--weight_decay", default=0.01, type=float) parser.add_argument("--adam_epsilon", default=1e-8, type=float) parser.add_argument("--grad_clip", default=1.0, type=float) parser.add_argument("--learning_rate", default=2e-5, type=float) parser.add_argument('--seed', type=int, default=42) parser.add_argument('--fp16', action='store_true') parser.add_argument('--fp16_opt_level', type=str, default='O1') args = parser.parse_args() init_logger( log_file=config['log_dir'] / f'{args.arch}-{time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())}.log' ) config['checkpoint_dir'] = config['checkpoint_dir'] / args.arch config['checkpoint_dir'].mkdir(exist_ok=True) # Good practice: save your training arguments together with the trained model torch.save(args, config['checkpoint_dir'] / 'training_args.bin') seed_everything(args.seed) logger.info("Training/evaluation parameters %s", args) args.save_best = False args.do_train = True args.resume_path = 'pybert/output/checkpoints/bert/checkpoint-epoch-3' args.do_lower_case = True if args.do_data: from pybert.io.task_data import TaskData data = TaskData() targets, sentences = data.read_data( raw_data_path=config['raw_data_path'], preprocessor=EnglishPreProcessor(), is_train=True) data.train_val_split(X=sentences, y=targets, shuffle=True, stratify=False, valid_size=args.valid_size, data_dir=config['data_dir'], data_name=args.data_name) if args.do_train: run_train(args) if args.do_test: run_test(args)
import os import random import json import collections import numpy as np from pybert.common.tools import save_json from pybert.configs.base import config from pybert.configs.bert_config import bert_base_config from pybert.common.tools import logger, init_logger from argparse import ArgumentParser from pybert.io.vocabulary import Vocabulary from pybert.common.tools import seed_everything MaskedLmInstance = collections.namedtuple("MaskedLmInstance",["index", "label"]) init_logger(log_file=config['log_dir'] / ("pregenerate_training_data.log")) def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return list(vocab.keys()) def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list): """Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
def main(): parser = ArgumentParser() parser.add_argument("--arch", default='bert', type=str) parser.add_argument("--do_data", action='store_true') parser.add_argument("--do_train", action='store_true') parser.add_argument("--do_test", action='store_true') parser.add_argument("--save_best", action='store_true') parser.add_argument("--do_lower_case", action='store_true') parser.add_argument('--data_name', default='kaggle', type=str) parser.add_argument("--epochs", default=6, type=int) parser.add_argument("--resume_path", default='', type=str) parser.add_argument("--mode", default='min', type=str) parser.add_argument("--monitor", default='valid_loss', type=str) parser.add_argument("--valid_size", default=0.2, type=float) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--sorted", default=1, type=int, help='1 : True 0:False ') parser.add_argument("--n_gpu", type=str, default='0', help='"0,1,.." or "0" or "" ') parser.add_argument('--gradient_accumulation_steps', type=int, default=1) parser.add_argument("--train_batch_size", default=8, type=int) parser.add_argument('--eval_batch_size', default=8, type=int) parser.add_argument("--train_max_seq_len", default=256, type=int) parser.add_argument("--eval_max_seq_len", default=256, type=int) parser.add_argument('--loss_scale', type=float, default=0) parser.add_argument( "--warmup_proportion", default=0.1, type=int, ) parser.add_argument("--weight_decay", default=0.01, type=float) parser.add_argument("--adam_epsilon", default=1e-8, type=float) parser.add_argument("--grad_clip", default=1.0, type=float) parser.add_argument("--learning_rate", default=2e-5, type=float) parser.add_argument('--seed', type=int, default=42) parser.add_argument('--fp16', action='store_true') parser.add_argument('--fp16_opt_level', type=str, default='O1') parser.add_argument("--prob_thresh", default=0.5, type=float) args = parser.parse_args() config['checkpoint_dir'] = config['checkpoint_dir'] / args.arch config['checkpoint_dir'].mkdir(exist_ok=True) # Good practice: save your training arguments together with the trained model torch.save(args, config['checkpoint_dir'] / 'training_args.bin') seed_everything(args.seed) init_logger(log_file=config['log_dir'] / f"{args.arch}.log") logger.info("Training/evaluation parameters %s", args) if args.do_data: from pybert.io.task_data_label import TaskData data = TaskData() print("Train data path:") print(config['raw_data_path']) targets, sentences_char = data.read_data( raw_data_path=config['raw_data_path'], preprocessor=EnglishPreProcessor(), is_train=True) print("Target:") print(targets) print(" ") print("Sentence:") print(sentences_char) print(" ") data.train_val_split(X=sentences_char, y=targets, valid_size=args.valid_size, data_dir=config['data_dir'], data_name=args.data_name) ##Get the test data targets_test, sentences_char_test = data.read_data( raw_data_path=config['test_path'], preprocessor=EnglishPreProcessor(), is_train=True) print(targets_test) data.save_test_data(X=sentences_char_test, y=targets_test, data_dir=config['data_dir'], data_name=args.data_name) if args.do_train: run_train(args) if args.do_test: run_test(args)
def main(): parser = ArgumentParser() parser.add_argument("--arch", default='bert', type=str) parser.add_argument("--do_data", action='store_true') parser.add_argument("--do_train", action='store_true') parser.add_argument("--do_test", action='store_true') parser.add_argument("--save_best", action='store_true') parser.add_argument("--do_lower_case", action='store_true') # parser.add_argument('--data_name', default='HPC', type=str) parser.add_argument("--mode", default='min', type=str) parser.add_argument("--monitor", default='valid_loss', type=str) parser.add_argument("--epochs", default=10, type=int) parser.add_argument("--resume_path", default='', type=str) parser.add_argument("--predict_checkpoints", type=int, default=0) parser.add_argument("--valid_size", default=0.2, type=float) parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--sorted", default=1, type=int, help='1 : True 0:False ') parser.add_argument("--n_gpu", type=str, default='0', help='"0,1,.." or "0" or "" ') parser.add_argument('--gradient_accumulation_steps', type=int, default=1) parser.add_argument("--train_batch_size", default=8, type=int) parser.add_argument('--eval_batch_size', default=8, type=int) parser.add_argument("--train_max_seq_len", default=256, type=int) parser.add_argument("--eval_max_seq_len", default=256, type=int) parser.add_argument('--loss_scale', type=float, default=0) parser.add_argument("--warmup_proportion", default=0.1, type=float) parser.add_argument("--weight_decay", default=0.01, type=float) parser.add_argument("--adam_epsilon", default=1e-8, type=float) parser.add_argument("--grad_clip", default=1.0, type=float) parser.add_argument("--learning_rate", default=2e-5, type=float) parser.add_argument('--seed', type=int, default=42) parser.add_argument('--fp16', action='store_true') parser.add_argument('--fp16_opt_level', type=str, default='O1') args = parser.parse_args() init_logger(log_file=config['log_dir'] / f'{args.arch}-{time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())}.log') config['checkpoint_dir'] = config['checkpoint_dir'] / args.arch config['checkpoint_dir'].mkdir(exist_ok=True) # Good practice: save your training arguments together with the trained model torch.save(args, config['checkpoint_dir'] / 'training_args.bin') seed_everything(args.seed) logger.info("Training/evaluation parameters %s", args) if args.do_data: data_names = [] train_sentenses_all = [] train_target_all = [] from pybert.io.task_data import TaskData data = TaskData() total_valid = 0 for filename in os.listdir(config['summary_path']): if filename == ".DS_Store" or filename == "summary": continue filename_int = int(filename.split('.')[0].split('_')[-1]) if filename_int > 3500: try: raw_data_path = os.path.join(config['summary_path'], filename) # train_targets, train_sentences, val_targets, val_sentences = data.read_data(config, # raw_data_path=raw_data_path, # preprocessor=EnglishPreProcessor()) train_targets, train_sentences, val_targets, val_sentences = data.read_data(config, raw_data_path=raw_data_path) train_sentenses_all = train_sentenses_all + train_sentences train_target_all = train_target_all + train_targets total_valid = len(train_target_all) print("valid number: ", total_valid) # data.save_pickle(train_sentences, train_targets, data_dir=config['data_dir'], # data_name=filename.split('.')[0].split('_')[-1], is_train=True) # data.save_pickle(val_sentences, val_targets, data_dir=config['data_dir'], # data_name=filename.split('.')[0].split('_')[-1], is_train=False) # data_names.append(filename.split('.')[0].split('_')[-1]) except: pass total_valid = len(train_target_all) print("valid number: ", total_valid) data.save_pickle(train_sentenses_all, train_target_all, data_dir=config['data_dir'], data_name="all_valid", is_train=False) # with open(config['data_name'], 'w') as f: # json.dump(data_names, f) with open(config['data_name'], 'r') as f: data_names = json.load(f) if args.do_train: run_train(args, data_names) if args.do_test: run_test(args)
def main(): parser = ArgumentParser() parser.add_argument("--arch", default='bert', type=str) # 使用的预训练语言模型 parser.add_argument("--do_data", action='store_true') # 进行数据切分 parser.add_argument("--do_train", action='store_true') # 进行模型训练 parser.add_argument("--do_test", action='store_true') # 进行模型推断 parser.add_argument("--save_best", action='store_true') # 保留最好的模型 parser.add_argument("--do_lower_case", action='store_true') parser.add_argument('--data_name', default='ccks', type=str) # 数据集的名字 parser.add_argument("--mode", default='min', type=str) # 设置monitor关注的角度 parser.add_argument("--monitor", default='valid_loss', type=str) parser.add_argument("--task_type", default='base', type=str) parser.add_argument("--epochs", default=4, type=int) parser.add_argument("--resume_path", default='', type=str) # 恢复路径,从pretrained model中载入模型 parser.add_argument("--predict_checkpoints", type=int, default=0) parser.add_argument("--valid_size", default=0.2, type=float) # 验证集的大小 parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--sorted", default=1, type=int, help='1 : True 0:False ') # 表示是否按照序列的长度排序 parser.add_argument("--n_gpu", type=str, default='0', help='"0,1,.." or "0" or "" ') parser.add_argument( '--gradient_accumulation_steps', type=int, default=1) # gradient_accumulation_steps的大小,用于解决内存小,无法使用大batch_size的问题 parser.add_argument("--train_batch_size", default=8, type=int) # 训练集batch_size parser.add_argument('--eval_batch_size', default=8, type=int) # 测试集batch_size parser.add_argument("--train_max_seq_len", default=256, type=int) # 训练集sequence的最大长度 parser.add_argument("--eval_max_seq_len", default=256, type=int) # 测试集sequence的最大长度 parser.add_argument('--loss_scale', type=float, default=0) # TODO: 理解loss scale的作用 parser.add_argument("--warmup_proportion", default=0.1, type=float) # 用于learning rate上的warmup proportion parser.add_argument("--weight_decay", default=0.01, type=float) # TODO: 理解weight decay的含义 parser.add_argument("--adam_epsilon", default=1e-8, type=float) # adam优化器的参数 parser.add_argument("--grad_clip", default=1.0, type=float) # TODO: 理解grad clip的含义 parser.add_argument("--learning_rate", default=2e-5, type=float) # 学习率 parser.add_argument('--seed', type=int, default=42) # 随机数种子 parser.add_argument('--fp16', action='store_true') # TODO: 理解fp16是什么 parser.add_argument('--fp16_opt_level', type=str, default='O1') args = parser.parse_args() # 初始化日志记录器logger config['log_dir'].mkdir(exist_ok=True) # 源代码没有写这句代码 init_logger( log_file=config['log_dir'] / f'{args.arch}-{time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())}.log' ) config['checkpoint_dir'] = config[ 'checkpoint_dir'] / args.arch / args.task_type # 重新调整输出的位置 config['checkpoint_dir'].mkdir(exist_ok=True) BASE_DIR = Path('pybert') config[ 'raw_data_path'] = BASE_DIR / f'dataset/train_{args.task_type}_sample.csv' config['test_path'] = BASE_DIR / f'dataset/test_{args.task_type}.csv' config['figure_dir'] = config['figure_dir'] / f'{args.task_type}' config['figure_dir'].mkdir(exist_ok=True) # 动态修改文件路径 # BASE_DIR = Path('pybert') # if args.task_type == 'trans': # config['raw_data_path'] = BASE_DIR / 'dataset/train_trans_sample.csv' # config['test_path'] = BASE_DIR / 'dataset/test_trans.csv' # config['figure_dir'] = config['figure_dir'] / f'{args.task_type}' # config['figure_dir'].mkdir(exist_ok=True) # elif args.task_type == 'base': # config['raw_data_path'] = BASE_DIR / 'dataset/train_base_sample.csv' # config['test_path'] = BASE_DIR / 'dataset/test_base.csv' # config['figure_dir'] = config['figure_dir'] / f'{args.task_type}' # config['figure_dir'].mkdir(exist_ok=True) # else: # raise ValueError(f"Invalid task_type {args.task_type}") # Good practice: save your training arguments together with the trained model torch.save(args, config['checkpoint_dir'] / 'training_args.bin') seed_everything(args.seed) # 一个方法设置所有的seed logger.info("Training/evaluation parameters %s", args) if args.do_data: from pybert.io.task_data import TaskData data = TaskData() ids, targets, sentences = data.read_data( raw_data_path=config['raw_data_path'], preprocessor=ChinesePreProcessor(), is_train=True) data.train_val_split(X=sentences, y=targets, shuffle=True, stratify=False, valid_size=args.valid_size, data_dir=config['data_dir'], data_name=args.data_name, task_type=args.task_type) # 增加了task_type参数 if args.do_train: run_train(args) if args.do_test: run_test(args)
from collections import namedtuple from tempfile import TemporaryDirectory from pybert.common.tools import logger, init_logger from pybert.configs.base import config from torch.utils.data import DataLoader, Dataset, RandomSampler from torch.utils.data.distributed import DistributedSampler from tools import AverageMeter from pybert.train.metrics import LMAccuracy from pybert.model.bert.modeling_bert import BertForMaskedLM, BertConfig,CONFIG_NAME, WEIGHTS_NAME from pybert.model.bert.tokenization_bert import BertTokenizer from pybert.model.bert.optimization import AdamW, WarmupLinearSchedule from pybert.common.tools import seed_everything InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids") init_logger(log_file=config['log_dir'] / ("train_bert_model.log")) class CustomTokenizer(BertTokenizer): def __init__(self, vocab_file, do_lower_case=True): super().__init__(vocab_file=str(vocab_file), do_lower_case=do_lower_case) self.vocab_file = str(vocab_file) self.do_lower_case = do_lower_case def tokenize(self, text): _tokens = [] for c in text: if self.do_lower_case: c = c.lower() if c in self.vocab: _tokens.append(c)