def run_train(args): # --------- data processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels() label2id = {label: i for i, label in enumerate(label_list)} id2label = {i: label for i, label in enumerate(label_list)} train_data = processor.get_train(config['data_dir'] / f"{args.data_name}.train.pkl") train_examples = processor.create_examples(lines=train_data, example_type='train', cached_examples_file=config['data_dir'] / f"cached_train_examples_{args.arch}") train_features = processor.create_features(examples=train_examples, max_seq_len=args.train_max_seq_len, cached_features_file=config[ 'data_dir'] / "cached_train_features_{}_{}".format( args.train_max_seq_len, args.arch )) train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted) if args.sorted: train_sampler = SequentialSampler(train_dataset) else: train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) valid_data = processor.get_dev(config['data_dir'] / f"{args.data_name}.valid.pkl") valid_examples = processor.create_examples(lines=valid_data, example_type='valid', cached_examples_file=config['data_dir'] / f"cached_valid_examples_{args.arch}") valid_features = processor.create_features(examples=valid_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config['data_dir'] / "cached_valid_features_{}_{}".format( args.eval_max_seq_len, args.arch)) valid_dataset = processor.create_dataset(valid_features) valid_sampler = SequentialSampler(valid_dataset) valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size) # ------- model logger.info("initializing model") if args.resume_path: args.resume_path = Path(args.resume_path) model = BertForMultiClass.from_pretrained(args.resume_path, num_labels=len(label_list)) else: model = BertForMultiClass.from_pretrained(config['bert_model_dir'], num_labels=len(label_list)) t_total = int(len(train_dataloader) / args.gradient_accumulation_steps * args.epochs) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] warmup_steps = int(t_total * args.warmup_proportion) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) lr_scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # ---- callbacks logger.info("initializing callbacks") train_monitor = TrainingMonitor(file_dir=config['figure_dir'], arch=args.arch) model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'], mode=args.mode, monitor=args.monitor, arch=args.arch, save_best_only=args.save_best) # **************************** training model *********************** logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Num Epochs = %d", args.epochs) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * ( torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) trainer = Trainer(n_gpu=args.n_gpu, model=model, epochs=args.epochs, logger=logger, criterion=CrossEntropy(), optimizer=optimizer, lr_scheduler=lr_scheduler, early_stopping=None, training_monitor=train_monitor, fp16=args.fp16, resume_path=args.resume_path, grad_clip=args.grad_clip, model_checkpoint=model_checkpoint, gradient_accumulation_steps=args.gradient_accumulation_steps, evaluate=F1Score(), class_report=ClassReport(target_names=[id2label[x] for x in range(len(label2id))])) trainer.train(train_data=train_dataloader, valid_data=valid_dataloader, seed=args.seed)
def main(): # **************************** Log initial data *********************** logger = init_logger(log_name=config['model']['arch'], log_dir=config['output']['log_dir']) logger.info(f"seed is {config['train']['seed']}") device = f"cuda: {config['train']['n_gpu'][0] if len(config['train']['n_gpu']) else 'cpu'}" seed_everything(seed=config['train']['seed'], device=device) logger.info('starting load data from disk') id2label = {value: key for key, value in config['label2id'].items()} DT = DataTransformer(logger=logger, seed=config['train']['seed']) targets, sentences = DT.read_data( raw_data_path=config['data']['raw_data_path'], preprocessor=EnglishPreProcessor(), is_train=True) train, valid = DT.train_val_split( X=sentences, y=targets, save=True, shuffle=True, stratify=False, valid_size=config['train']['valid_size'], train_path=config['data']['train_file_path'], valid_path=config['data']['valid_file_path']) tokenizer = BertTokenizer( vocab_file=config['pretrained']['bert']['vocab_path'], do_lower_case=config['train']['do_lower_case']) # train train_dataset = CreateDataset(data=train, tokenizer=tokenizer, max_seq_len=config['train']['max_seq_len'], seed=config['train']['seed'], example_type='train') # valid valid_dataset = CreateDataset(data=valid, tokenizer=tokenizer, max_seq_len=config['train']['max_seq_len'], seed=config['train']['seed'], example_type='valid') # train loader train_loader = DataLoader(dataset=train_dataset, batch_size=config['train']['batch_size'], num_workers=config['train']['num_workers'], shuffle=True, drop_last=False, pin_memory=False) # validation set loader valid_loader = DataLoader(dataset=valid_dataset, batch_size=config['train']['batch_size'], num_workers=config['train']['num_workers'], shuffle=False, drop_last=False, pin_memory=False) # **************************** initialize model *********************** logger.info("initializing model") model = BertFine.from_pretrained( config['pretrained']['bert']['bert_model_dir'], cache_dir=config['output']['cache_dir'], num_classes=len(id2label)) # ************************** set params ************************* param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] num_train_steps = int( len(train_dataset.examples) / config['train']['batch_size'] / config['train']['gradient_accumulation_steps'] * config['train']['epochs']) # t_total: total number of training steps for the learning rate schedule # warmup: portion of t_total for the warmup optimizer = BertAdam(optimizer_grouped_parameters, lr=config['train']['learning_rate'], warmup=config['train']['warmup_proportion'], t_total=num_train_steps) # **************************** callbacks *********************** logger.info("initializing callbacks") # model checkpoint model_checkpoint = ModelCheckpoint( checkpoint_dir=config['output']['checkpoint_dir'], mode=config['callbacks']['mode'], monitor=config['callbacks']['monitor'], save_best_only=config['callbacks']['save_best_only'], arch=config['model']['arch'], logger=logger) # monitor train_monitor = TrainingMonitor(file_dir=config['output']['figure_dir'], arch=config['model']['arch']) # learning rate scheduler lr_scheduler = BertLR(optimizer=optimizer, learning_rate=config['train']['learning_rate'], t_total=num_train_steps, warmup=config['train']['warmup_proportion']) # **************************** training model *********************** logger.info('training model....') train_configs = { 'model': model, 'logger': logger, 'optimizer': optimizer, 'resume': config['train']['resume'], 'epochs': config['train']['epochs'], 'n_gpu': config['train']['n_gpu'], 'gradient_accumulation_steps': config['train']['gradient_accumulation_steps'], 'epoch_metrics': [F1Score(average='micro', task_type='binary')], 'batch_metrics': [AccuracyThresh(thresh=0.5)], 'criterion': BCEWithLogLoss(), 'model_checkpoint': model_checkpoint, 'training_monitor': train_monitor, 'lr_scheduler': lr_scheduler, 'early_stopping': None, 'verbose': 1 } trainer = Trainer(train_configs=train_configs) trainer.train(train_data=train_loader, valid_data=valid_loader) if len(config['train']['n_gpu']) > 0: torch.cuda.empty_cache()
def run_train(args, data_names): # --------- data # processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) processor = BertProcessor() label_list = processor.get_labels() label2id = {label: i for i, label in enumerate(label_list)} id2label = {i: label for i, label in enumerate(label_list)} # train_data = processor.get_train(config['data_dir'] / f"{data_name}.train.pkl") # train_examples = processor.create_examples(lines=train_data, # example_type='train', # cached_examples_file=config[ # 'data_dir'] / f"cached_train_examples_{args.arch}") # train_features = processor.create_features(examples=train_examples, # max_seq_len=args.train_max_seq_len, # cached_features_file=config[ # 'data_dir'] / "cached_train_features_{}_{}".format( # args.train_max_seq_len, args.arch # )) # train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted) # if args.sorted: # train_sampler = SequentialSampler(train_dataset) # else: # train_sampler = RandomSampler(train_dataset) # train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, # collate_fn=collate_fn) # # valid_data = processor.get_dev(config['data_dir'] / f"{data_name}.valid.pkl") # valid_examples = processor.create_examples(lines=valid_data, # example_type='valid', # cached_examples_file=config[ # 'data_dir'] / f"cached_valid_examples_{args.arch}") # # valid_features = processor.create_features(examples=valid_examples, # max_seq_len=args.eval_max_seq_len, # cached_features_file=config[ # 'data_dir'] / "cached_valid_features_{}_{}".format( # args.eval_max_seq_len, args.arch # )) # valid_dataset = processor.create_dataset(valid_features) # valid_sampler = SequentialSampler(valid_dataset) # valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size, # collate_fn=collate_fn) # ------- model logger.info("initializing model") if args.resume_path: args.resume_path = Path(args.resume_path) model = BertForMultiLable.from_pretrained(args.resume_path, num_labels=len(label_list)) else: # model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list)) model = BertForMultiLable.from_pretrained("bert-base-multilingual-cased", num_labels=len(label_list)) #t_total = int(len(train_dataloader) / args.gradient_accumulation_steps * args.epochs) t_total = 200000 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay': args.weight_decay}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] warmup_steps = int(t_total * args.warmup_proportion) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # ---- callbacks logger.info("initializing callbacks") train_monitor = TrainingMonitor(file_dir=config['figure_dir'], arch=args.arch) model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],mode=args.mode, monitor=args.monitor,arch=args.arch, save_best_only=args.save_best) # **************************** training model *********************** logger.info("***** Running training *****") #logger.info(" Num examples = %d", len(train_examples)) logger.info(" Num Epochs = %d", args.epochs) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * ( torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) trainer = Trainer(args= args,model=model,logger=logger,criterion=BCEWithLogLoss(),optimizer=optimizer, scheduler=scheduler,early_stopping=None,training_monitor=train_monitor, model_checkpoint=model_checkpoint, batch_metrics=[AccuracyThresh(thresh=0.5)], epoch_metrics=[AUC(average='micro', task_type='binary'), MultiLabelReport(id2label=id2label), F1Score(average='micro', task_type='binary')]) trainer.model.zero_grad() seed_everything(trainer.args.seed) # Added here for reproductibility (even between python 2 a iter_num = 0 valid_dataloader = get_valid_dataloader(args) for epoch in range(trainer.start_epoch, trainer.start_epoch + trainer.args.epochs): trainer.logger.info(f"Epoch {epoch}/{trainer.args.epochs}") update_epoch = True for i, data_name in enumerate(data_names): filename_int = int(data_name) if filename_int > 3500: continue trainer.logger.info(f"Epoch {epoch} - summary {i+1}/{len(data_names)}"+ f": summary_{data_name}") # train_dataloader, valid_dataloader = get_dataloader(args, data_name) train_dataloader = get_dataloader(args, data_name) # train_log, valid_log = trainer.train(train_data=train_dataloader, valid_data=valid_dataloader, epoch=update_epoch) train_log = trainer.train(train_data=train_dataloader, epoch=update_epoch) update_epoch = False # if train_log == None: # continue iter_num += 1 # logs = dict(train_log) # show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key, value in logs.items()]) # trainer.logger.info(show_info) if iter_num % 50 == 0: valid_log = trainer.valid_epoch(valid_dataloader) logs = dict(valid_log) show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key, value in logs.items()]) trainer.logger.info(show_info) # save if trainer.training_monitor: trainer.training_monitor.epoch_step(logs) # save model if trainer.model_checkpoint: if iter_num % 50 == 0: # state = trainer.save_info(epoch, best=logs[trainer.model_checkpoint.monitor]) state = trainer.save_info(iter_num, best=logs[trainer.model_checkpoint.monitor]) trainer.model_checkpoint.bert_epoch_step(current=logs[trainer.model_checkpoint.monitor], state=state) # early_stopping if trainer.early_stopping: trainer.early_stopping.epoch_step(epoch=epoch, current=logs[trainer.early_stopping.monitor]) if trainer.early_stopping.stop_training: break
def run_train(args): # --------- data processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels(args.task_type) label2id = {label: i for i, label in enumerate(label_list)} id2label = {i: label for i, label in enumerate(label_list)} train_data = processor.get_train( config['data_dir'] / f"{args.data_name}.train.{args.task_type}.pkl") train_examples = processor.create_examples( lines=train_data, example_type=f'train_{args.task_type}', cached_examples_file=config['data_dir'] / f"cached_train_{args.task_type}_examples_{args.arch}") train_features = processor.create_features( examples=train_examples, max_seq_len=args.train_max_seq_len, cached_features_file=config['data_dir'] / "cached_train_{}_features_{}_{}".format( args.task_type, args.train_max_seq_len, args.arch)) train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted) if args.sorted: train_sampler = SequentialSampler(train_dataset) else: train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) valid_data = processor.get_dev( config['data_dir'] / f"{args.data_name}.valid.{args.task_type}.pkl") valid_examples = processor.create_examples( lines=valid_data, example_type=f'valid_{args.task_type}', cached_examples_file=config['data_dir'] / f"cached_valid_{args.task_type}_examples_{args.arch}") valid_features = processor.create_features( examples=valid_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config['data_dir'] / "cached_valid_{}_features_{}_{}".format( args.task_type, args.eval_max_seq_len, args.arch)) valid_dataset = processor.create_dataset(valid_features) valid_sampler = SequentialSampler(valid_dataset) valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn) # ------- model logger.info("initializing model") if args.resume_path: args.resume_path = Path(args.resume_path) model = BertForMultiLable.from_pretrained(args.resume_path, num_labels=len(label_list)) else: if args.task_type == 'trans': model = BertForMultiLable_Fewshot.from_pretrained( Path('pybert/output/checkpoints/bert/base'), num_labels=len(label_list)) #model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list)) else: model = BertForMultiLable.from_pretrained( config['bert_model_dir'], num_labels=len(label_list)) t_total = int( len(train_dataloader) / args.gradient_accumulation_steps * args.epochs) # 下面是optimizer和scheduler的设计 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] warmup_steps = int(t_total * args.warmup_proportion) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # ---- callbacks logger.info("initializing callbacks") train_monitor = TrainingMonitor( file_dir=config['figure_dir'], arch=args.arch ) # TODO: 理解train_monitor的作用,感觉就是一个用来绘图的东西,用于记录每一个epoch中得到的结果 model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'], mode=args.mode, monitor=args.monitor, arch=args.arch, save_best_only=args.save_best) # **************************** training model *********************** logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Num Epochs = %d", args.epochs) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) trainer = Trainer( args=args, model=model, logger=logger, criterion=BCEWithLogLoss(), optimizer=optimizer, scheduler=scheduler, early_stopping=None, training_monitor=train_monitor, model_checkpoint=model_checkpoint, batch_metrics=[ AccuracyThresh(thresh=0.5) ], # 作用于batch之上的metrics,在每次loss.backward()之后都会执行计算,记得区分它与loss epoch_metrics=[ AUC(average='micro', task_type='binary'), # 作用于epoch之上的metrics MultiLabelReport(id2label=id2label), F1Score(task_type='binary', average='micro', search_thresh=True) ]) # TODO: 考虑是否应该使用F1-score替代指标 trainer.train(train_data=train_dataloader, valid_data=valid_dataloader)
def run_test(args): # TODO: 对训练集使用micro F1-score进行结果评测 from pybert.io.task_data import TaskData from pybert.test.predictor import Predictor data = TaskData() ids, targets, sentences = data.read_data( raw_data_path=config['test_path'], preprocessor=ChinesePreProcessor(), is_train=True) # 设置为True lines = list(zip(sentences, targets)) #print(ids,sentences) processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case) label_list = processor.get_labels(args.task_type) id2label = {i: label for i, label in enumerate(label_list)} test_data = processor.get_test(lines=lines) test_examples = processor.create_examples( lines=test_data, example_type=f'test_{args.task_type}', cached_examples_file=config['data_dir'] / f"cached_test_{args.task_type}_examples_{args.arch}") test_features = processor.create_features( examples=test_examples, max_seq_len=args.eval_max_seq_len, cached_features_file=config['data_dir'] / "cached_test_{}_features_{}_{}".format( args.task_type, args.eval_max_seq_len, args.arch)) test_dataset = processor.create_dataset(test_features) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) model = None if args.task_type == 'base': model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) else: # model = BertForMultiLable.from_pretrained(config['checkpoint_dir'], num_labels=len(label_list)) model = BertForMultiLable_Fewshot.from_pretrained( config['checkpoint_dir'], num_labels=len(label_list)) # ----------- predicting logger.info('model predicting....') predictor = Predictor(model=model, logger=logger, n_gpu=args.n_gpu) result = predictor.predict(data=test_dataloader) # 感觉这个变量名叫all_logits可能更好 # TODO: 计算F1-score,这个功能模块需要用代码测试一下~ f1_metric = F1Score(task_type='binary', average='micro', search_thresh=True) all_logits = torch.tensor(result, dtype=torch.float) # 转换成tensor all_labels = torch.tensor(targets, dtype=torch.long) # 转换成tensor f1_metric(all_logits, all_labels) # 会自动打印结果 print(f1_metric.value()) # 将结果写入一个文件之中 with open('test_output/test.log', 'a+') as f: f.write(str(f1_metric.value()) + "\n") thresh = f1_metric.thresh ids = np.array(ids) df1 = pd.DataFrame(ids, index=None) df2 = pd.DataFrame(result, index=None) all_df = pd.concat([df1, df2], axis=1) if args.task_type == 'base': all_df.columns = ['id', 'zy', 'gfgqzr', 'qs', 'tz', 'ggjc'] else: all_df.columns = ['id', 'sg', 'pj', 'zb', 'qsht', 'db'] for column in all_df.columns[1:]: all_df[column] = all_df[column].apply(lambda x: 1 if x > thresh else 0) # all_df['zy'] = all_df['zy'].apply(lambda x: 1 if x>thresh else 0) # all_df['gfgqzr'] = all_df['gfgqzr'].apply(lambda x: 1 if x>thresh else 0) # all_df['qs'] = all_df['qs'].apply(lambda x: 1 if x>thresh else 0) # all_df['tz'] = all_df['tz'].apply(lambda x: 1 if x>thresh else 0) # all_df['ggjc'] = all_df['ggjc'].apply(lambda x: 1 if x>thresh else 0) all_df.to_csv(f"test_output/{args.task_type}/cls_out.csv", index=False)
def main(): # **************************** Basic Info *********************** logger = init_logger(log_name=config['arch'], log_dir=config['log_dir']) logger.info("seed is %d" % config['seed']) device = 'cuda:%d' % config['n_gpus'][0] if len( config['n_gpus']) else 'cpu' seed_everything(seed=config['seed'], device=device) logger.info('starting load data from disk') # split the reports if config['resume']: split_reports = SplitReports(raw_reports_dir=config['raw_reports_dir'], raw_data_path=config['raw_data_path']) split_reports.split() df = pd.read_csv(config['raw_data_path']) label_list = df.columns.values[2:].tolist() config['label_to_id'] = {label: i for i, label in enumerate(label_list)} config['id_to_label'] = {i: label for i, label in enumerate(label_list)} config['vocab_path'] = path.sep.join( [config['bert_model_dir'], 'vocab.txt']) # **************************** Data *********************** data_transformer = DataTransformer(logger=logger, raw_data_path=config['raw_data_path'], label_to_id=config['label_to_id'], train_file=config['train_file_path'], valid_file=config['valid_file_path'], valid_size=config['valid_size'], seed=config['seed'], preprocess=Preprocessor(), shuffle=config['shuffle'], skip_header=True, stratify=False) # dataloader and pre-processing data_transformer.read_data() tokenizer = BertTokenizer(vocab_file=config['vocab_path'], do_lower_case=config['do_lower_case']) # train train_dataset = CreateDataset(data_path=config['train_file_path'], tokenizer=tokenizer, max_seq_len=config['max_seq_len'], seed=config['seed'], example_type='train') # valid valid_dataset = CreateDataset(data_path=config['valid_file_path'], tokenizer=tokenizer, max_seq_len=config['max_seq_len'], seed=config['seed'], example_type='valid') # resume best model if config['resume']: train_loader = [0] else: train_loader = DataLoader(dataset=train_dataset, batch_size=config['batch_size'], num_workers=config['num_workers'], shuffle=True, drop_last=False, pin_memory=False) # valid valid_loader = DataLoader(dataset=valid_dataset, batch_size=config['batch_size'], num_workers=config['num_workers'], shuffle=False, drop_last=False, pin_memory=False) # **************************** Model *********************** logger.info("initializing model") if config['resume']: with open(config['lab_dir'], 'r') as f: config['label_to_id'] = load(f) model = BertFine.from_pretrained(config['bert_model_dir'], cache_dir=config['cache_dir'], num_classes=len(config['label_to_id'])) # ************************** Optimizer ************************* param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] num_train_steps = int( len(train_dataset.examples) / config['batch_size'] / config['gradient_accumulation_steps'] * config['epochs']) # t_total: total number of training steps for the learning rate schedule # warmup: portion of t_total for the warmup optimizer = BertAdam(optimizer_grouped_parameters, lr=config['learning_rate'], warmup=config['warmup_proportion'], t_total=num_train_steps) # **************************** callbacks *********************** logger.info("initializing callbacks") # save model model_checkpoint = ModelCheckpoint( checkpoint_dir=config['checkpoint_dir'], mode=config['mode'], monitor=config['monitor'], save_best_only=config['save_best_only'], best_model_name=config['best_model_name'], epoch_model_name=config['epoch_model_name'], arch=config['arch'], logger=logger) # monitor train_monitor = TrainingMonitor(fig_dir=config['figure_dir'], json_dir=config['log_dir'], arch=config['arch']) # TensorBoard start_time = datetime.datetime.now().strftime('%m%d_%H%M%S') writer_dir = os.path.join(config['writer_dir'], config['feature-based'], start_time) TSBoard = WriterTensorboardX(writer_dir=writer_dir, logger=logger, enable=True) # learning rate lr_scheduler = BertLr(optimizer=optimizer, lr=config['learning_rate'], t_total=num_train_steps, warmup=config['warmup_proportion']) # **************************** training model *********************** logger.info('training model....') trainer = Trainer(model=model, train_data=train_loader, val_data=valid_loader, optimizer=optimizer, epochs=config['epochs'], criterion=BCEWithLogLoss(), logger=logger, model_checkpoint=model_checkpoint, training_monitor=train_monitor, TSBoard=TSBoard, resume=config['resume'], lr_scheduler=lr_scheduler, n_gpu=config['n_gpus'], label_to_id=config['label_to_id'], evaluate_auc=AUC(sigmoid=True), evaluate_f1=F1Score(sigmoid=True), incorrect=Incorrect(sigmoid=True)) trainer.summary() trainer.train() # release cache if len(config['n_gpus']) > 0: torch.cuda.empty_cache()
def main(): # **************************** 基础信息 *********************** logger = init_logger(log_name=config['arch'], log_dir=config['log_dir']) logger.info("seed is %d" % config['seed']) device = 'cuda:%d' % config['n_gpus'][0] if len( config['n_gpus']) else 'cpu' seed_everything(seed=config['seed'], device=device) logger.info('starting load data from disk') config['id_to_label'] = {v: k for k, v in config['label_to_id'].items()} # **************************** 数据生成 *********************** data_transformer = DataTransformer(logger=logger, label_to_id=config['label_to_id'], train_file=config['train_file_path'], valid_file=config['valid_file_path'], valid_size=config['valid_size'], seed=config['seed'], shuffle=True, skip_header=False, preprocess=None, raw_data_path=config['raw_data_path']) # 读取数据集以及数据划分 data_transformer.read_data() # train train_dataset = CreateDataset(data_path=config['train_file_path'], vocab_path=config['vocab_path'], max_seq_len=config['max_seq_len'], seed=config['seed'], example_type='train') # valid valid_dataset = CreateDataset(data_path=config['valid_file_path'], vocab_path=config['vocab_path'], max_seq_len=config['max_seq_len'], seed=config['seed'], example_type='valid') #加载训练数据集 train_loader = DataLoader(dataset=train_dataset, batch_size=config['batch_size'], num_workers=config['num_workers'], shuffle=True, drop_last=False, pin_memory=False) # 验证数据集 valid_loader = DataLoader(dataset=valid_dataset, batch_size=config['batch_size'], num_workers=config['num_workers'], shuffle=False, drop_last=False, pin_memory=False) # **************************** 模型 *********************** logger.info("initializing model") model = BertFine.from_pretrained(config['bert_model_dir'], cache_dir=config['cache_dir'], num_classes=len(config['label_to_id'])) # ************************** 优化器 ************************* param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] num_train_steps = int( len(train_dataset.examples) / config['batch_size'] / config['gradient_accumulation_steps'] * config['epochs']) # t_total: total number of training steps for the learning rate schedule # warmup: portion of t_total for the warmup optimizer = BertAdam(optimizer_grouped_parameters, lr=config['learning_rate'], warmup=config['warmup_proportion'], t_total=num_train_steps) # **************************** callbacks *********************** logger.info("initializing callbacks") # 模型保存 model_checkpoint = ModelCheckpoint( checkpoint_dir=config['checkpoint_dir'], mode=config['mode'], monitor=config['monitor'], save_best_only=config['save_best_only'], best_model_name=config['best_model_name'], epoch_model_name=config['epoch_model_name'], arch=config['arch'], logger=logger) # 监控训练过程 train_monitor = TrainingMonitor(fig_dir=config['figure_dir'], json_dir=config['log_dir'], arch=config['arch']) # 学习率机制 lr_scheduler = BertLr(optimizer=optimizer, lr=config['learning_rate'], t_total=num_train_steps, warmup=config['warmup_proportion']) # **************************** training model *********************** logger.info('training model....') trainer = Trainer(model=model, train_data=train_loader, val_data=valid_loader, optimizer=optimizer, epochs=config['epochs'], criterion=CrossEntropy(), logger=logger, model_checkpoint=model_checkpoint, training_monitor=train_monitor, resume=config['resume'], lr_scheduler=lr_scheduler, n_gpu=config['n_gpus'], evaluate=F1Score(), class_report=ClassReport(target_names=[ config['id_to_label'][x] for x in range(len(config['label_to_id'])) ])) # 查看模型结构 trainer.summary() # 拟合模型 trainer.train() # 释放显存 if len(config['n_gpus']) > 0: torch.cuda.empty_cache()