예제 #1
0
def main():
    # **************************** 基础信息 ***********************
    logger = init_logger(log_name=config['model']['arch'],
                         log_dir=config['output']['log_dir'])
    logger.info(f"seed is {config['train']['seed']}")
    device = f"cuda: {config['train']['n_gpu'][0] if len(config['train']['n_gpu']) else 'cpu'}"
    seed_everything(seed=config['train']['seed'], device=device)
    logger.info('starting load data from disk')
    id2label = {value: key for key, value in config['label2id'].items()}

    # **************************** 数据生成 ***********************
    DT = DataTransformer(logger=logger, seed=config['train']['seed'])
    # 读取数据集以及数据划分
    targets, sentences = DT.read_data(
        raw_data_path=config['data']['raw_data_path'],
        preprocessor=EnglishPreProcessor(),
        is_train=True)

    train, valid = DT.train_val_split(
        X=sentences,
        y=targets,
        save=True,
        shuffle=True,
        stratify=False,
        valid_size=config['train']['valid_size'],
        train_path=config['data']['train_file_path'],
        valid_path=config['data']['valid_file_path'])

    tokenizer = BertTokenizer(
        vocab_file=config['pretrained']['bert']['vocab_path'],
        do_lower_case=config['train']['do_lower_case'])

    # train
    train_dataset = CreateDataset(data=train,
                                  tokenizer=tokenizer,
                                  max_seq_len=config['train']['max_seq_len'],
                                  seed=config['train']['seed'],
                                  example_type='train')

    valid_dataset = CreateDataset(data=valid,
                                  tokenizer=tokenizer,
                                  max_seq_len=config['train']['max_seq_len'],
                                  seed=config['train']['seed'],
                                  example_type='valid')
    #加载训练数据集
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config['train']['batch_size'],
                              num_workers=config['train']['num_workers'],
                              shuffle=True,
                              drop_last=False,
                              pin_memory=False)
    # 验证数据集
    valid_loader = DataLoader(dataset=valid_dataset,
                              batch_size=config['train']['batch_size'],
                              num_workers=config['train']['num_workers'],
                              shuffle=False,
                              drop_last=False,
                              pin_memory=False)

    # **************************** 模型 ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(
        config['pretrained']['bert']['bert_model_dir'],
        cache_dir=config['output']['cache_dir'],
        num_classes=len(id2label))

    # ************************** 优化器 *************************
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    num_train_steps = int(
        len(train_dataset.examples) / config['train']['batch_size'] /
        config['train']['gradient_accumulation_steps'] *
        config['train']['epochs'])
    # t_total: total number of training steps for the learning rate schedule
    # warmup: portion of t_total for the warmup
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=config['train']['learning_rate'],
                         warmup=config['train']['warmup_proportion'],
                         t_total=num_train_steps)

    # **************************** callbacks ***********************
    logger.info("initializing callbacks")
    # 模型保存
    model_checkpoint = ModelCheckpoint(
        checkpoint_dir=config['output']['checkpoint_dir'],
        mode=config['callbacks']['mode'],
        monitor=config['callbacks']['monitor'],
        save_best_only=config['callbacks']['save_best_only'],
        arch=config['model']['arch'],
        logger=logger)
    # 监控训练过程
    train_monitor = TrainingMonitor(file_dir=config['output']['figure_dir'],
                                    arch=config['model']['arch'])
    # 学习率机制
    lr_scheduler = BertLR(optimizer=optimizer,
                          learning_rate=config['train']['learning_rate'],
                          t_total=num_train_steps,
                          warmup=config['train']['warmup_proportion'])

    # **************************** training model ***********************
    logger.info('training model....')

    train_configs = {
        'model':
        model,
        'logger':
        logger,
        'optimizer':
        optimizer,
        'resume':
        config['train']['resume'],
        'epochs':
        config['train']['epochs'],
        'n_gpu':
        config['train']['n_gpu'],
        'gradient_accumulation_steps':
        config['train']['gradient_accumulation_steps'],
        'epoch_metrics': [
            F1Score(average='micro', task_type='binary'),
            MultiLabelReport(id2label=id2label)
        ],
        'batch_metrics': [AccuracyThresh(thresh=0.5)],
        'criterion':
        BCEWithLogLoss(),
        'model_checkpoint':
        model_checkpoint,
        'training_monitor':
        train_monitor,
        'lr_scheduler':
        lr_scheduler,
        'early_stopping':
        None,
        'verbose':
        1
    }

    trainer = Trainer(train_configs=train_configs)
    # 拟合模型
    trainer.train(train_data=train_loader, valid_data=valid_loader)
    # 释放显存
    if len(config['train']['n_gpu']) > 0:
        torch.cuda.empty_cache()
예제 #2
0
def run_train(args):
    # --------- data
    model_to_use = "roberta-large"
    processor = RobertaProcessor(model_type=model_to_use) #vocab_path=config['roberta_vocab_path'], merge_path=config['roberta_merge_path'])
    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    train_data = processor.get_train(config['data_dir'] / f"{args.data_name}.train.pkl")
    
    train_examples = processor.create_examples(lines=train_data,
                                               example_type='train',
                                               cached_examples_file=config[
                                                    'data_dir'] / f"cached_train_examples_{args.arch}")
    train_features = processor.create_features(examples=train_examples,
                                               max_seq_len=args.train_max_seq_len,
                                               cached_features_file=config[
                                                    'data_dir'] / "cached_train_features_{}_{}".format(
                                                   args.train_max_seq_len, args.arch
                                               ))
    train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted)
    
    train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,
                                  collate_fn=collate_fn)

    valid_data = processor.get_dev(config['data_dir'] / f"{args.data_name}.valid.pkl")
    valid_examples = processor.create_examples(lines=valid_data,
                                               example_type='valid',
                                               cached_examples_file=config[
                                                'data_dir'] / f"cached_valid_examples_{args.arch}")

    valid_features = processor.create_features(examples=valid_examples,
                                               max_seq_len=args.eval_max_seq_len,
                                               cached_features_file=config[
                                                'data_dir'] / "cached_valid_features_{}_{}".format(
                                                   args.eval_max_seq_len, args.arch
                                               ))
    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)
    valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size,
                                  collate_fn=collate_fn)
    
    
    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = RobertaForMultiLable.from_pretrained(args.resume_path, num_labels=len(label_list))
    else:
        model = RobertaForMultiLable.from_pretrained(model_to_use, num_labels=len(label_list))
    #config['roberta_model_dir']
    print("""\n\nname            module\n----------------------""")
    for name, module in model.named_children():
        if name == "roberta":
            for n, _ in module.named_children():
                print(f"{name}:{n}")
        else:
            print("{:15} {}".format(name, module))
    print()
    #return
    # print("================= train dataloader length is", len(train_dataloader), "=================\n")
    t_total = int(len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)

    param_optimizer = list(model.named_parameters())
    
    no_decay = ['bias', 'LayerNorm.weight']
    
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay) and 'classifier.weight' not in n], 'weight_decay': args.weight_decay}, #
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
        {'params': model.classifier.weight, 'lr': 5e-4} # best: 5e-4
        # {'params': model.classifier.bias, 'lr': 5e-4, 'weight_decay': 0.0}
    ]
    # model.parameters()
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'], arch=args.arch)
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],mode=args.mode,
                                       monitor=args.monitor,arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps * (
                    torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(args=args, model=model, logger=logger, criterion=BCEWithLogLoss(), optimizer=optimizer,
                      scheduler=scheduler, early_stopping=None, training_monitor=train_monitor, 
                      model_checkpoint=model_checkpoint,
                      batch_metrics=[AccuracyThresh(thresh=0.5)],
                      epoch_metrics=[AUC(average='weighted', task_type='binary'),  # average='micro'
                                     MultiLabelReport(id2label=id2label)])
    trainer.train(train_data=train_dataloader, valid_data=valid_dataloader)
예제 #3
0
def run_train(args):
    # --------- data
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)
    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    #print(config['data_dir'] / f"{args.data_name}.train.pkl")
    #exit()
    train_data = processor.get_train(config['data_dir'] /
                                     f"{args.data_name}.train.pkl")
    train_examples = processor.create_examples(
        lines=train_data,
        example_type='train',
        cached_examples_file=config['data_dir'] /
        f"cached_train_examples_{args.arch}")
    train_features = processor.create_features(
        examples=train_examples,
        max_seq_len=args.train_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_train_features_{}_{}".format(args.train_max_seq_len,
                                             args.arch))
    train_dataset = processor.create_dataset(train_features,
                                             is_sorted=args.sorted)
    if args.sorted:
        train_sampler = SequentialSampler(train_dataset)
    else:
        train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size)

    valid_data = processor.get_dev(config['data_dir'] /
                                   f"{args.data_name}.valid.pkl")
    valid_examples = processor.create_examples(
        lines=valid_data,
        example_type='valid',
        cached_examples_file=config['data_dir'] /
        f"cached_valid_examples_{args.arch}")

    valid_features = processor.create_features(
        examples=valid_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_valid_features_{}_{}".format(args.eval_max_seq_len, args.arch))
    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)
    valid_dataloader = DataLoader(valid_dataset,
                                  sampler=valid_sampler,
                                  batch_size=args.eval_batch_size)

    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiLable.from_pretrained(args.resume_path,
                                                  num_labels=len(label_list))
    else:
        model = BertForMultiLable.from_pretrained(config['bert_model_dir'],
                                                  num_labels=len(label_list))
    t_total = int(
        len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)

    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        args.weight_decay
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters,
                      lr=args.learning_rate,
                      eps=args.adam_epsilon)
    lr_scheduler = WarmupLinearSchedule(optimizer,
                                        warmup_steps=warmup_steps,
                                        t_total=t_total)

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.fp16_opt_level)

    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'],
                                    arch=args.arch)
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],
                                       mode=args.mode,
                                       monitor=args.monitor,
                                       arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info(
        "  Total train batch size (w. parallel, distributed & accumulation) = %d",
        args.train_batch_size * args.gradient_accumulation_steps *
        (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d",
                args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(
        n_gpu=args.n_gpu,
        model=model,
        epochs=args.epochs,
        logger=logger,
        criterion=BCEWithLogLoss(),
        optimizer=optimizer,
        lr_scheduler=lr_scheduler,
        early_stopping=None,
        training_monitor=train_monitor,
        fp16=args.fp16,
        resume_path=args.resume_path,
        grad_clip=args.grad_clip,
        model_checkpoint=model_checkpoint,
        gradient_accumulation_steps=args.gradient_accumulation_steps,
        batch_metrics=[AccuracyThresh(thresh=0.5)],
        epoch_metrics=[
            AUC(average='micro', task_type='binary'),
            MultiLabelReport(id2label=id2label)
        ])
    trainer.train(train_data=train_dataloader,
                  valid_data=valid_dataloader,
                  seed=args.seed)