コード例 #1
0
def main():
    # **************************** Log initial data ***********************
    logger = init_logger(log_name=config['model']['arch'],
                         log_dir=config['output']['log_dir'])
    logger.info(f"seed is {config['train']['seed']}")
    device = f"cuda: {config['train']['n_gpu'][0] if len(config['train']['n_gpu']) else 'cpu'}"
    seed_everything(seed=config['train']['seed'], device=device)
    logger.info('starting load data from disk')
    id2label = {value: key for key, value in config['label2id'].items()}

    DT = DataTransformer(logger=logger, seed=config['train']['seed'])

    targets, sentences = DT.read_data(
        raw_data_path=config['data']['raw_data_path'],
        preprocessor=EnglishPreProcessor(),
        is_train=True)

    train, valid = DT.train_val_split(
        X=sentences,
        y=targets,
        save=True,
        shuffle=True,
        stratify=False,
        valid_size=config['train']['valid_size'],
        train_path=config['data']['train_file_path'],
        valid_path=config['data']['valid_file_path'])

    tokenizer = BertTokenizer(
        vocab_file=config['pretrained']['bert']['vocab_path'],
        do_lower_case=config['train']['do_lower_case'])

    # train
    train_dataset = CreateDataset(data=train,
                                  tokenizer=tokenizer,
                                  max_seq_len=config['train']['max_seq_len'],
                                  seed=config['train']['seed'],
                                  example_type='train')
    # valid
    valid_dataset = CreateDataset(data=valid,
                                  tokenizer=tokenizer,
                                  max_seq_len=config['train']['max_seq_len'],
                                  seed=config['train']['seed'],
                                  example_type='valid')
    # train loader
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config['train']['batch_size'],
                              num_workers=config['train']['num_workers'],
                              shuffle=True,
                              drop_last=False,
                              pin_memory=False)
    # validation set loader
    valid_loader = DataLoader(dataset=valid_dataset,
                              batch_size=config['train']['batch_size'],
                              num_workers=config['train']['num_workers'],
                              shuffle=False,
                              drop_last=False,
                              pin_memory=False)

    # **************************** initialize model ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(
        config['pretrained']['bert']['bert_model_dir'],
        cache_dir=config['output']['cache_dir'],
        num_classes=len(id2label))

    # ************************** set params *************************
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    num_train_steps = int(
        len(train_dataset.examples) / config['train']['batch_size'] /
        config['train']['gradient_accumulation_steps'] *
        config['train']['epochs'])
    # t_total: total number of training steps for the learning rate schedule
    # warmup: portion of t_total for the warmup
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=config['train']['learning_rate'],
                         warmup=config['train']['warmup_proportion'],
                         t_total=num_train_steps)

    # **************************** callbacks ***********************
    logger.info("initializing callbacks")
    # model checkpoint
    model_checkpoint = ModelCheckpoint(
        checkpoint_dir=config['output']['checkpoint_dir'],
        mode=config['callbacks']['mode'],
        monitor=config['callbacks']['monitor'],
        save_best_only=config['callbacks']['save_best_only'],
        arch=config['model']['arch'],
        logger=logger)
    # monitor
    train_monitor = TrainingMonitor(file_dir=config['output']['figure_dir'],
                                    arch=config['model']['arch'])
    # learning rate scheduler
    lr_scheduler = BertLR(optimizer=optimizer,
                          learning_rate=config['train']['learning_rate'],
                          t_total=num_train_steps,
                          warmup=config['train']['warmup_proportion'])

    # **************************** training model ***********************
    logger.info('training model....')

    train_configs = {
        'model':
        model,
        'logger':
        logger,
        'optimizer':
        optimizer,
        'resume':
        config['train']['resume'],
        'epochs':
        config['train']['epochs'],
        'n_gpu':
        config['train']['n_gpu'],
        'gradient_accumulation_steps':
        config['train']['gradient_accumulation_steps'],
        'epoch_metrics': [F1Score(average='micro', task_type='binary')],
        'batch_metrics': [AccuracyThresh(thresh=0.5)],
        'criterion':
        BCEWithLogLoss(),
        'model_checkpoint':
        model_checkpoint,
        'training_monitor':
        train_monitor,
        'lr_scheduler':
        lr_scheduler,
        'early_stopping':
        None,
        'verbose':
        1
    }

    trainer = Trainer(train_configs=train_configs)
    trainer.train(train_data=train_loader, valid_data=valid_loader)
    if len(config['train']['n_gpu']) > 0:
        torch.cuda.empty_cache()
コード例 #2
0
def run_train(args):
    # --------- data
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)

    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    train_data = processor.get_train(config['data_dir'] /
                                     f"{args.data_name}.label_train.pkl")

    print("Train data is:")
    print(train_data)

    train_examples = processor.create_examples(
        lines=train_data,
        example_type='train',
        cached_examples_file=config['data_cache'] /
        f"cached_train_label_examples_finetune{args.arch}")

    # print ("Training examples are:")
    # print (train_examples)
    train_features = processor.create_features(
        examples=train_examples,
        max_seq_len=args.train_max_seq_len,
        cached_features_file=config['data_cache'] /
        "cached_train_label_features_finetune{}_{}".format(
            args.train_max_seq_len, args.arch))

    train_dataset = processor.create_dataset(train_features,
                                             is_sorted=args.sorted)

    if args.sorted:
        train_sampler = SequentialSampler(train_dataset)
    else:
        train_sampler = RandomSampler(train_dataset)

    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size)

    valid_data = processor.get_dev(config['data_dir'] /
                                   f"{args.data_name}.label_valid.pkl")

    valid_examples = processor.create_examples(
        lines=valid_data,
        example_type='valid',
        cached_examples_file=config['data_cache'] /
        f"cached_valid_examples_label_finetune{args.arch}")

    valid_features = processor.create_features(
        examples=valid_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_cache'] /
        "cached_valid_features_label_finetune{}_{}".format(
            args.eval_max_seq_len, args.arch))

    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)

    valid_dataloader = DataLoader(valid_dataset,
                                  sampler=valid_sampler,
                                  batch_size=args.eval_batch_size)

    # ------- model
    logger.info("initializing model")

    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiLable.from_pretrained(args.resume_path,
                                                  num_labels=len(label_list))

    else:
        print("Labels are:")
        print(label_list)
        # model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list))
        model = BertForMultiLable.from_pretrained("bert-base-uncased",
                                                  num_labels=len(label_list))

    t_total = int(
        len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)

    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        args.weight_decay
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters,
                      lr=args.learning_rate,
                      eps=args.adam_epsilon)
    lr_scheduler = WarmupLinearSchedule(optimizer,
                                        warmup_steps=warmup_steps,
                                        t_total=t_total)

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.fp16_opt_level)

    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'],
                                    arch=args.arch)

    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],
                                       mode=args.mode,
                                       monitor=args.monitor,
                                       arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info(
        "  Total train batch size (w. parallel, distributed & accumulation) = %d",
        args.train_batch_size * args.gradient_accumulation_steps *
        (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d",
                args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(
        n_gpu=args.n_gpu,
        model=model,
        epochs=args.epochs,
        logger=logger,
        criterion=BCEWithLogLoss(),
        optimizer=optimizer,
        lr_scheduler=lr_scheduler,
        early_stopping=None,
        training_monitor=train_monitor,
        fp16=args.fp16,
        resume_path=args.resume_path,
        grad_clip=args.grad_clip,
        model_checkpoint=model_checkpoint,
        gradient_accumulation_steps=args.gradient_accumulation_steps,
        batch_metrics=[AccuracyThresh(thresh=0.5)],
        epoch_metrics=[
            AUC(average='micro', task_type='binary'),
            MultiLabelReport(id2label=id2label)
        ])

    # embeddings_dict = pickle.load(open("/home/rgaonkar/context_home/rgaonkar/label_embeddings/code/Bert_Masked_LM/label_embeddings_dict.p", "rb"))

    # label_similarity_matrix = get_label_similarity_matrix(embeddings_dict, label_list)

    trainer.train(train_data=train_dataloader,
                  valid_data=valid_dataloader,
                  seed=args.seed)
コード例 #3
0
def run_train(args):
    # --------- data
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)

    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    ##Get the data for the soft training task
    train_data = processor.get_train(config['data_dir'] /
                                     f"{args.data_name}.label_train.pkl")

    print("Train data is:")
    print(train_data)

    train_examples = processor.create_examples(
        lines=train_data,
        example_type='train',
        cached_examples_file=config['data_cache'] /
        f"cached_train_label_examples_finetune{args.arch}")

    # print ("Training examples are:")
    # print (train_examples)
    train_features = processor.create_features(
        examples=train_examples,
        max_seq_len=args.train_max_seq_len,
        cached_features_file=config['data_cache'] /
        "cached_train_label_features_finetune{}_{}".format(
            args.train_max_seq_len, args.arch))

    train_dataset = processor.create_dataset(train_features,
                                             is_sorted=args.sorted)

    if args.sorted:
        train_sampler = SequentialSampler(train_dataset)
    else:
        train_sampler = RandomSampler(train_dataset)

    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size)

    ###########################################################################
    ##Get data for the semi-supervised task

    # processor_semi = BertProcessor_semi(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case)

    # label_list = processor_semi.get_labels()
    # label2id = {label: i for i, label in enumerate(label_list)}
    # id2label = {i: label for i, label in enumerate(label_list)}

    train_data_semi = processor.get_train_semi(config['unlabel_data_path'])

    print("Train data is:")
    print(train_data)

    train_examples_semi = processor.create_examples_semi(
        lines=train_data_semi,
        example_type='train',
        cached_examples_file=config['data_cache'] /
        f"cached_train_unlabel_examples_finetune{args.arch}")

    # print ("Training examples are:")
    # print (train_examples)
    train_features_semi = processor.create_features_semi(
        examples=train_examples_semi,
        max_seq_len=args.train_max_seq_len,
        cached_features_file=config['data_cache'] /
        "cached_train_unlabel_features_finetune{}_{}".format(
            args.train_max_seq_len, args.arch))

    train_dataset_semi = processor.create_dataset_semi(train_features_semi,
                                                       is_sorted=args.sorted)

    if args.sorted:
        train_sampler_semi = SequentialSampler(train_dataset_semi)
    else:
        train_sampler_semi = RandomSampler(train_dataset_semi)

    train_dataloader_semi = DataLoader(train_dataset_semi,
                                       sampler=train_sampler_semi,
                                       batch_size=args.train_batch_size)

    valid_data = processor.get_dev(config['data_dir'] /
                                   f"{args.data_name}.label_valid.pkl")

    valid_examples = processor.create_examples(
        lines=valid_data,
        example_type='valid',
        cached_examples_file=config['data_cache'] /
        f"cached_valid_examples_label_finetune{args.arch}")

    valid_features = processor.create_features(
        examples=valid_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_cache'] /
        "cached_valid_features_label_finetune{}_{}".format(
            args.eval_max_seq_len, args.arch))

    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)

    valid_dataloader = DataLoader(valid_dataset,
                                  sampler=valid_sampler,
                                  batch_size=args.eval_batch_size)

    # ------- model
    logger.info("initializing model")

    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiLable.from_pretrained(args.resume_path,
                                                  num_labels=len(label_list))

    else:
        print("Labels are:")
        print(label_list)

        # model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list))

        #model = BertForMultiLable.from_pretrained("pybert/output/checkpoints_label_finetune_soft_joint_corr_emotion/bert", num_labels=len(label_list))
        model = BertForMultiLable.from_pretrained("bert-base-uncased")

        # model = BertForMultiLable.from_pretrained("bert-base-uncased", num_labels=len(label_list))

    t_total = int(
        len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)

    param_optimizer = list(model.named_parameters())
    # param_optimizer = list(filter(lambda named_param: named_param[1].requires_grad, model.named_parameters()))

    no_decay = ['bias', 'LayerNorm.weight']

    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        args.weight_decay
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    warmup_steps = int(t_total * args.warmup_proportion)

    optimizer = AdamW(optimizer_grouped_parameters,
                      lr=args.learning_rate,
                      eps=args.adam_epsilon)

    lr_scheduler = WarmupLinearSchedule(optimizer,
                                        warmup_steps=warmup_steps,
                                        t_total=t_total)

    ##For semi-supervision
    t_total_semi = int(
        len(train_dataloader_semi) / args.gradient_accumulation_steps *
        args.epochs)

    ##params for this model only contains the params other than the label graph
    # param_optimizer_semi = [(name, param) for (name, param) in list(model.named_parameters()) if "label_graph" not in name]

    param_optimizer_semi = [(name, param)
                            for name, param in model.named_parameters()
                            if name == 'label_graph.weight']
    # param_optimizer = list(filter(lambda named_param: named_param[1].requires_grad, model.named_parameters()))

    no_decay = ['bias', 'LayerNorm.weight']

    optimizer_grouped_parameters_semi = [{
        'params': [
            p for n, p in param_optimizer_semi
            if not any(nd in n for nd in no_decay)
        ],
        'weight_decay':
        args.weight_decay
    }, {
        'params': [
            p for n, p in param_optimizer_semi
            if any(nd in n for nd in no_decay)
        ],
        'weight_decay':
        0.0
    }]

    warmup_steps_semi = int(t_total_semi * args.warmup_proportion)

    optimizer_semi = AdamW(optimizer_grouped_parameters_semi,
                           lr=args.learning_rate,
                           eps=args.adam_epsilon)

    lr_scheduler_semi = WarmupLinearSchedule(optimizer_semi,
                                             warmup_steps=warmup_steps_semi,
                                             t_total=t_total)

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.fp16_opt_level)

    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'],
                                    arch=args.arch)

    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],
                                       mode=args.mode,
                                       monitor=args.monitor,
                                       arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info(
        "  Total train batch size (w. parallel, distributed & accumulation) = %d",
        args.train_batch_size * args.gradient_accumulation_steps *
        (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d",
                args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(
        n_gpu=args.n_gpu,
        model=model,
        epochs=args.epochs,
        logger=logger,
        # criterion_hard=BCEWithLogLoss(),
        criterion=ContinuousBCEWithLogLoss(),
        optimizer=optimizer,
        lr_scheduler=lr_scheduler,
        optimizer_semi=optimizer_semi,
        lr_scheduler_semi=lr_scheduler_semi,
        early_stopping=None,
        training_monitor=train_monitor,
        fp16=args.fp16,
        resume_path=args.resume_path,
        grad_clip=args.grad_clip,
        model_checkpoint=model_checkpoint,
        gradient_accumulation_steps=args.gradient_accumulation_steps,
        batch_metrics=[AccuracyThresh(thresh=0.5)],
        ##Only look at the f1 score
        epoch_metrics=[MultiLabelReport(id2label=id2label)])

    # embeddings_dict = pickle.load(open("/home/rgaonkar/context_home/rgaonkar/label_embeddings/code/Bert_Masked_LM/label_embeddings_dict.p", "rb"))

    # label_similarity_matrix = get_label_similarity_matrix(embeddings_dict, label_list)

    # ------- model
    logger.info("initializing model")

    true_labels_matrix = [sample[-1].tolist() for sample in train_dataset]
    print("True train labels:")
    print(true_labels_matrix)

    train_label_corr = get_label_corr(true_labels_matrix)

    print("True train label correlations:")
    print(train_label_corr)

    #Save the correlation matrix of the true labels in the data cache folder
    pickle.dump(train_label_corr,
                open(config['data_cache'] / "train_label_corr.p", "wb"))

    trainer.train(train_data=train_dataloader,
                  train_data_semi=train_dataloader_semi,
                  valid_data=valid_dataloader,
                  seed=args.seed,
                  prob_thresh=args.prob_thresh,
                  true_label_corr=train_label_corr,
                  tokenizer=processor.tokenizer,
                  args=args)
コード例 #4
0
def run_train(args):
    # --------- data
    processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case)
    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    train_data = processor.get_train(config['data_dir'] / f"{args.data_name}.train.pkl")
    train_examples = processor.create_examples(lines=train_data,
                                               example_type='train',
                                               cached_examples_file=config['data_dir'] / f"cached_train_examples_{args.arch}")
    train_features = processor.create_features(examples=train_examples,
                                               max_seq_len=args.train_max_seq_len,
                                               cached_features_file=config[
                                                                        'data_dir'] / "cached_train_features_{}_{}".format(
                                                   args.train_max_seq_len, args.arch
                                               ))
    train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted)
    if args.sorted:
        train_sampler = SequentialSampler(train_dataset)
    else:
        train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)

    valid_data = processor.get_dev(config['data_dir'] / f"{args.data_name}.valid.pkl")
    valid_examples = processor.create_examples(lines=valid_data,
                                               example_type='valid',
                                               cached_examples_file=config['data_dir'] / f"cached_valid_examples_{args.arch}")

    valid_features = processor.create_features(examples=valid_examples,
                                               max_seq_len=args.eval_max_seq_len,
                                               cached_features_file=config['data_dir'] / "cached_valid_features_{}_{}".format(
                                                   args.eval_max_seq_len, args.arch))
    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)
    valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size)

    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiClass.from_pretrained(args.resume_path, num_labels=len(label_list))
    else:
        model = BertForMultiClass.from_pretrained(config['bert_model_dir'], num_labels=len(label_list))
    t_total = int(len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)

    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': args.weight_decay},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    lr_scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=t_total)

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)

    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'], arch=args.arch)
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'], mode=args.mode,
                                       monitor=args.monitor, arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps * (
                    torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(n_gpu=args.n_gpu,
                      model=model,
                      epochs=args.epochs,
                      logger=logger,
                      criterion=CrossEntropy(),
                      optimizer=optimizer,
                      lr_scheduler=lr_scheduler,
                      early_stopping=None,
                      training_monitor=train_monitor,
                      fp16=args.fp16,
                      resume_path=args.resume_path,
                      grad_clip=args.grad_clip,
                      model_checkpoint=model_checkpoint,
                      gradient_accumulation_steps=args.gradient_accumulation_steps,
                      evaluate=F1Score(),
                      class_report=ClassReport(target_names=[id2label[x] for x in range(len(label2id))]))
    trainer.train(train_data=train_dataloader, valid_data=valid_dataloader, seed=args.seed)
コード例 #5
0
def run_train(args, data_names):
    # --------- data
    # processor = BertProcessor(vocab_path=config['bert_vocab_path'], do_lower_case=args.do_lower_case)
    processor = BertProcessor()
    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    # train_data = processor.get_train(config['data_dir'] / f"{data_name}.train.pkl")
    # train_examples = processor.create_examples(lines=train_data,
    #                                            example_type='train',
    #                                            cached_examples_file=config[
    #                                                 'data_dir'] / f"cached_train_examples_{args.arch}")
    # train_features = processor.create_features(examples=train_examples,
    #                                            max_seq_len=args.train_max_seq_len,
    #                                            cached_features_file=config[
    #                                                 'data_dir'] / "cached_train_features_{}_{}".format(
    #                                                args.train_max_seq_len, args.arch
    #                                            ))
    # train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted)
    # if args.sorted:
    #     train_sampler = SequentialSampler(train_dataset)
    # else:
    #     train_sampler = RandomSampler(train_dataset)
    # train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,
    #                               collate_fn=collate_fn)
    #
    # valid_data = processor.get_dev(config['data_dir'] / f"{data_name}.valid.pkl")
    # valid_examples = processor.create_examples(lines=valid_data,
    #                                            example_type='valid',
    #                                            cached_examples_file=config[
    #                                             'data_dir'] / f"cached_valid_examples_{args.arch}")
    #
    # valid_features = processor.create_features(examples=valid_examples,
    #                                            max_seq_len=args.eval_max_seq_len,
    #                                            cached_features_file=config[
    #                                             'data_dir'] / "cached_valid_features_{}_{}".format(
    #                                                args.eval_max_seq_len, args.arch
    #                                            ))
    # valid_dataset = processor.create_dataset(valid_features)
    # valid_sampler = SequentialSampler(valid_dataset)
    # valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size,
    #                               collate_fn=collate_fn)

    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiLable.from_pretrained(args.resume_path, num_labels=len(label_list))
    else:
        # model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list))
        model = BertForMultiLable.from_pretrained("bert-base-multilingual-cased", num_labels=len(label_list))
    #t_total = int(len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)
    t_total = 200000
  
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
         {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay': args.weight_decay},
         {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                   num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'], arch=args.arch)
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],mode=args.mode,
                                       monitor=args.monitor,arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    #logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps * (
                    torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(args= args,model=model,logger=logger,criterion=BCEWithLogLoss(),optimizer=optimizer,
                      scheduler=scheduler,early_stopping=None,training_monitor=train_monitor,
                      model_checkpoint=model_checkpoint,
                      batch_metrics=[AccuracyThresh(thresh=0.5)],
                      epoch_metrics=[AUC(average='micro', task_type='binary'),
                                     MultiLabelReport(id2label=id2label),
                                     F1Score(average='micro', task_type='binary')])

    trainer.model.zero_grad()
    seed_everything(trainer.args.seed)  # Added here for reproductibility (even between python 2 a
    
    iter_num = 0
    valid_dataloader = get_valid_dataloader(args)
    for epoch in range(trainer.start_epoch, trainer.start_epoch + trainer.args.epochs):
        trainer.logger.info(f"Epoch {epoch}/{trainer.args.epochs}")
        update_epoch = True

        for i, data_name in enumerate(data_names):
            filename_int = int(data_name)
            if filename_int > 3500:
                continue
            trainer.logger.info(f"Epoch {epoch} - summary {i+1}/{len(data_names)}"+ f": summary_{data_name}")
            # train_dataloader, valid_dataloader = get_dataloader(args, data_name)
            train_dataloader = get_dataloader(args, data_name)
            # train_log, valid_log = trainer.train(train_data=train_dataloader, valid_data=valid_dataloader, epoch=update_epoch)
            train_log = trainer.train(train_data=train_dataloader, epoch=update_epoch)
            update_epoch = False

            # if train_log == None:
            #     continue
            
            iter_num += 1

            # logs = dict(train_log)
            # show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key, value in logs.items()])
            # trainer.logger.info(show_info)


            if iter_num % 50 == 0:
                valid_log = trainer.valid_epoch(valid_dataloader)
                logs = dict(valid_log)
                show_info = f'\nEpoch: {epoch} - ' + "-".join([f' {key}: {value:.4f} ' for key, value in logs.items()])
                trainer.logger.info(show_info)

                # save
                if trainer.training_monitor:
                    trainer.training_monitor.epoch_step(logs)

            # save model
            if trainer.model_checkpoint:
                if iter_num % 50 == 0:
                #     state = trainer.save_info(epoch, best=logs[trainer.model_checkpoint.monitor])
                    state = trainer.save_info(iter_num, best=logs[trainer.model_checkpoint.monitor])
                    trainer.model_checkpoint.bert_epoch_step(current=logs[trainer.model_checkpoint.monitor], state=state)

            # early_stopping
            if trainer.early_stopping:
                trainer.early_stopping.epoch_step(epoch=epoch, current=logs[trainer.early_stopping.monitor])
                if trainer.early_stopping.stop_training:
                    break
コード例 #6
0
def run_train(args):
    # --------- data
    processor = BertProcessor(vocab_path=config['bert_vocab_path'],
                              do_lower_case=args.do_lower_case)
    label_list = processor.get_labels(args.task_type)
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    train_data = processor.get_train(
        config['data_dir'] / f"{args.data_name}.train.{args.task_type}.pkl")
    train_examples = processor.create_examples(
        lines=train_data,
        example_type=f'train_{args.task_type}',
        cached_examples_file=config['data_dir'] /
        f"cached_train_{args.task_type}_examples_{args.arch}")
    train_features = processor.create_features(
        examples=train_examples,
        max_seq_len=args.train_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_train_{}_features_{}_{}".format(
            args.task_type, args.train_max_seq_len, args.arch))
    train_dataset = processor.create_dataset(train_features,
                                             is_sorted=args.sorted)
    if args.sorted:
        train_sampler = SequentialSampler(train_dataset)
    else:
        train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size,
                                  collate_fn=collate_fn)

    valid_data = processor.get_dev(
        config['data_dir'] / f"{args.data_name}.valid.{args.task_type}.pkl")
    valid_examples = processor.create_examples(
        lines=valid_data,
        example_type=f'valid_{args.task_type}',
        cached_examples_file=config['data_dir'] /
        f"cached_valid_{args.task_type}_examples_{args.arch}")

    valid_features = processor.create_features(
        examples=valid_examples,
        max_seq_len=args.eval_max_seq_len,
        cached_features_file=config['data_dir'] /
        "cached_valid_{}_features_{}_{}".format(
            args.task_type, args.eval_max_seq_len, args.arch))
    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)
    valid_dataloader = DataLoader(valid_dataset,
                                  sampler=valid_sampler,
                                  batch_size=args.eval_batch_size,
                                  collate_fn=collate_fn)

    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = BertForMultiLable.from_pretrained(args.resume_path,
                                                  num_labels=len(label_list))
    else:
        if args.task_type == 'trans':
            model = BertForMultiLable_Fewshot.from_pretrained(
                Path('pybert/output/checkpoints/bert/base'),
                num_labels=len(label_list))
            #model = BertForMultiLable.from_pretrained(config['bert_model_dir'], num_labels=len(label_list))
        else:
            model = BertForMultiLable.from_pretrained(
                config['bert_model_dir'], num_labels=len(label_list))
    t_total = int(
        len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)
    # 下面是optimizer和scheduler的设计
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        args.weight_decay
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters,
                      lr=args.learning_rate,
                      eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=warmup_steps,
                                                num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.fp16_opt_level)
    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(
        file_dir=config['figure_dir'], arch=args.arch
    )  # TODO: 理解train_monitor的作用,感觉就是一个用来绘图的东西,用于记录每一个epoch中得到的结果
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],
                                       mode=args.mode,
                                       monitor=args.monitor,
                                       arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info(
        "  Total train batch size (w. parallel, distributed & accumulation) = %d",
        args.train_batch_size * args.gradient_accumulation_steps *
        (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d",
                args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(
        args=args,
        model=model,
        logger=logger,
        criterion=BCEWithLogLoss(),
        optimizer=optimizer,
        scheduler=scheduler,
        early_stopping=None,
        training_monitor=train_monitor,
        model_checkpoint=model_checkpoint,
        batch_metrics=[
            AccuracyThresh(thresh=0.5)
        ],  # 作用于batch之上的metrics,在每次loss.backward()之后都会执行计算,记得区分它与loss
        epoch_metrics=[
            AUC(average='micro', task_type='binary'),  # 作用于epoch之上的metrics
            MultiLabelReport(id2label=id2label),
            F1Score(task_type='binary', average='micro', search_thresh=True)
        ])  # TODO: 考虑是否应该使用F1-score替代指标
    trainer.train(train_data=train_dataloader, valid_data=valid_dataloader)
コード例 #7
0
def run_train(args):
    # --------- data
    model_to_use = "roberta-large"
    processor = RobertaProcessor(model_type=model_to_use) #vocab_path=config['roberta_vocab_path'], merge_path=config['roberta_merge_path'])
    label_list = processor.get_labels()
    label2id = {label: i for i, label in enumerate(label_list)}
    id2label = {i: label for i, label in enumerate(label_list)}

    train_data = processor.get_train(config['data_dir'] / f"{args.data_name}.train.pkl")
    
    train_examples = processor.create_examples(lines=train_data,
                                               example_type='train',
                                               cached_examples_file=config[
                                                    'data_dir'] / f"cached_train_examples_{args.arch}")
    train_features = processor.create_features(examples=train_examples,
                                               max_seq_len=args.train_max_seq_len,
                                               cached_features_file=config[
                                                    'data_dir'] / "cached_train_features_{}_{}".format(
                                                   args.train_max_seq_len, args.arch
                                               ))
    train_dataset = processor.create_dataset(train_features, is_sorted=args.sorted)
    
    train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,
                                  collate_fn=collate_fn)

    valid_data = processor.get_dev(config['data_dir'] / f"{args.data_name}.valid.pkl")
    valid_examples = processor.create_examples(lines=valid_data,
                                               example_type='valid',
                                               cached_examples_file=config[
                                                'data_dir'] / f"cached_valid_examples_{args.arch}")

    valid_features = processor.create_features(examples=valid_examples,
                                               max_seq_len=args.eval_max_seq_len,
                                               cached_features_file=config[
                                                'data_dir'] / "cached_valid_features_{}_{}".format(
                                                   args.eval_max_seq_len, args.arch
                                               ))
    valid_dataset = processor.create_dataset(valid_features)
    valid_sampler = SequentialSampler(valid_dataset)
    valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.eval_batch_size,
                                  collate_fn=collate_fn)
    
    
    # ------- model
    logger.info("initializing model")
    if args.resume_path:
        args.resume_path = Path(args.resume_path)
        model = RobertaForMultiLable.from_pretrained(args.resume_path, num_labels=len(label_list))
    else:
        model = RobertaForMultiLable.from_pretrained(model_to_use, num_labels=len(label_list))
    #config['roberta_model_dir']
    print("""\n\nname            module\n----------------------""")
    for name, module in model.named_children():
        if name == "roberta":
            for n, _ in module.named_children():
                print(f"{name}:{n}")
        else:
            print("{:15} {}".format(name, module))
    print()
    #return
    # print("================= train dataloader length is", len(train_dataloader), "=================\n")
    t_total = int(len(train_dataloader) / args.gradient_accumulation_steps * args.epochs)

    param_optimizer = list(model.named_parameters())
    
    no_decay = ['bias', 'LayerNorm.weight']
    
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay) and 'classifier.weight' not in n], 'weight_decay': args.weight_decay}, #
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
        {'params': model.classifier.weight, 'lr': 5e-4} # best: 5e-4
        # {'params': model.classifier.bias, 'lr': 5e-4, 'weight_decay': 0.0}
    ]
    # model.parameters()
    warmup_steps = int(t_total * args.warmup_proportion)
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                num_training_steps=t_total)
    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
    # ---- callbacks
    logger.info("initializing callbacks")
    train_monitor = TrainingMonitor(file_dir=config['figure_dir'], arch=args.arch)
    model_checkpoint = ModelCheckpoint(checkpoint_dir=config['checkpoint_dir'],mode=args.mode,
                                       monitor=args.monitor,arch=args.arch,
                                       save_best_only=args.save_best)

    # **************************** training model ***********************
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_examples))
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info("  Total train batch size (w. parallel, distributed & accumulation) = %d",
                args.train_batch_size * args.gradient_accumulation_steps * (
                    torch.distributed.get_world_size() if args.local_rank != -1 else 1))
    logger.info("  Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    trainer = Trainer(args=args, model=model, logger=logger, criterion=BCEWithLogLoss(), optimizer=optimizer,
                      scheduler=scheduler, early_stopping=None, training_monitor=train_monitor, 
                      model_checkpoint=model_checkpoint,
                      batch_metrics=[AccuracyThresh(thresh=0.5)],
                      epoch_metrics=[AUC(average='weighted', task_type='binary'),  # average='micro'
                                     MultiLabelReport(id2label=id2label)])
    trainer.train(train_data=train_dataloader, valid_data=valid_dataloader)
コード例 #8
0
def main():
    # **************************** Basic Info  ***********************
    logger = init_logger(log_name=config['arch'], log_dir=config['log_dir'])
    logger.info("seed is %d" % config['seed'])
    device = 'cuda:%d' % config['n_gpus'][0] if len(
        config['n_gpus']) else 'cpu'
    seed_everything(seed=config['seed'], device=device)
    logger.info('starting load data from disk')

    # split the reports
    if config['resume']:
        split_reports = SplitReports(raw_reports_dir=config['raw_reports_dir'],
                                     raw_data_path=config['raw_data_path'])
        split_reports.split()

    df = pd.read_csv(config['raw_data_path'])
    label_list = df.columns.values[2:].tolist()
    config['label_to_id'] = {label: i for i, label in enumerate(label_list)}
    config['id_to_label'] = {i: label for i, label in enumerate(label_list)}
    config['vocab_path'] = path.sep.join(
        [config['bert_model_dir'], 'vocab.txt'])

    # **************************** Data  ***********************
    data_transformer = DataTransformer(logger=logger,
                                       raw_data_path=config['raw_data_path'],
                                       label_to_id=config['label_to_id'],
                                       train_file=config['train_file_path'],
                                       valid_file=config['valid_file_path'],
                                       valid_size=config['valid_size'],
                                       seed=config['seed'],
                                       preprocess=Preprocessor(),
                                       shuffle=config['shuffle'],
                                       skip_header=True,
                                       stratify=False)
    # dataloader and pre-processing
    data_transformer.read_data()

    tokenizer = BertTokenizer(vocab_file=config['vocab_path'],
                              do_lower_case=config['do_lower_case'])

    # train
    train_dataset = CreateDataset(data_path=config['train_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='train')
    # valid
    valid_dataset = CreateDataset(data_path=config['valid_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='valid')
    # resume best model
    if config['resume']:
        train_loader = [0]
    else:
        train_loader = DataLoader(dataset=train_dataset,
                                  batch_size=config['batch_size'],
                                  num_workers=config['num_workers'],
                                  shuffle=True,
                                  drop_last=False,
                                  pin_memory=False)
    # valid
    valid_loader = DataLoader(dataset=valid_dataset,
                              batch_size=config['batch_size'],
                              num_workers=config['num_workers'],
                              shuffle=False,
                              drop_last=False,
                              pin_memory=False)

    # **************************** Model  ***********************
    logger.info("initializing model")
    if config['resume']:
        with open(config['lab_dir'], 'r') as f:
            config['label_to_id'] = load(f)

    model = BertFine.from_pretrained(config['bert_model_dir'],
                                     cache_dir=config['cache_dir'],
                                     num_classes=len(config['label_to_id']))

    # ************************** Optimizer  *************************
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    num_train_steps = int(
        len(train_dataset.examples) / config['batch_size'] /
        config['gradient_accumulation_steps'] * config['epochs'])
    # t_total: total number of training steps for the learning rate schedule
    # warmup: portion of t_total for the warmup
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=config['learning_rate'],
                         warmup=config['warmup_proportion'],
                         t_total=num_train_steps)

    # **************************** callbacks ***********************
    logger.info("initializing callbacks")
    # save model
    model_checkpoint = ModelCheckpoint(
        checkpoint_dir=config['checkpoint_dir'],
        mode=config['mode'],
        monitor=config['monitor'],
        save_best_only=config['save_best_only'],
        best_model_name=config['best_model_name'],
        epoch_model_name=config['epoch_model_name'],
        arch=config['arch'],
        logger=logger)
    # monitor
    train_monitor = TrainingMonitor(fig_dir=config['figure_dir'],
                                    json_dir=config['log_dir'],
                                    arch=config['arch'])

    # TensorBoard
    start_time = datetime.datetime.now().strftime('%m%d_%H%M%S')
    writer_dir = os.path.join(config['writer_dir'], config['feature-based'],
                              start_time)
    TSBoard = WriterTensorboardX(writer_dir=writer_dir,
                                 logger=logger,
                                 enable=True)
    # learning rate
    lr_scheduler = BertLr(optimizer=optimizer,
                          lr=config['learning_rate'],
                          t_total=num_train_steps,
                          warmup=config['warmup_proportion'])

    # **************************** training model ***********************
    logger.info('training model....')
    trainer = Trainer(model=model,
                      train_data=train_loader,
                      val_data=valid_loader,
                      optimizer=optimizer,
                      epochs=config['epochs'],
                      criterion=BCEWithLogLoss(),
                      logger=logger,
                      model_checkpoint=model_checkpoint,
                      training_monitor=train_monitor,
                      TSBoard=TSBoard,
                      resume=config['resume'],
                      lr_scheduler=lr_scheduler,
                      n_gpu=config['n_gpus'],
                      label_to_id=config['label_to_id'],
                      evaluate_auc=AUC(sigmoid=True),
                      evaluate_f1=F1Score(sigmoid=True),
                      incorrect=Incorrect(sigmoid=True))

    trainer.summary()
    trainer.train()

    # release cache
    if len(config['n_gpus']) > 0:
        torch.cuda.empty_cache()
コード例 #9
0
def main():
    # **************************** 基础信息 ***********************
    logger = init_logger(log_name=config['arch'], log_dir=config['log_dir'])
    logger.info("seed is %d" % config['seed'])
    device = 'cuda:%d' % config['n_gpus'][0] if len(
        config['n_gpus']) else 'cpu'
    seed_everything(seed=config['seed'], device=device)
    logger.info('starting load data from disk')
    config['id_to_label'] = {v: k for k, v in config['label_to_id'].items()}
    # **************************** 数据生成 ***********************
    data_transformer = DataTransformer(logger=logger,
                                       raw_data_path=config['raw_data_path'],
                                       label_to_id=config['label_to_id'],
                                       train_file=config['train_file_path'],
                                       valid_file=config['valid_file_path'],
                                       valid_size=config['valid_size'],
                                       seed=config['seed'],
                                       preprocess=Preprocessor(),
                                       shuffle=True,
                                       skip_header=True,
                                       stratify=False)
    # 读取数据集以及数据划分
    data_transformer.read_data()

    tokenizer = BertTokenizer(vocab_file=config['vocab_path'],
                              do_lower_case=config['do_lower_case'])

    # train
    train_dataset = CreateDataset(data_path=config['train_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='train')
    # valid
    valid_dataset = CreateDataset(data_path=config['valid_file_path'],
                                  tokenizer=tokenizer,
                                  max_seq_len=config['max_seq_len'],
                                  seed=config['seed'],
                                  example_type='valid')
    #加载训练数据集
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config['batch_size'],
                              num_workers=config['num_workers'],
                              shuffle=True,
                              drop_last=False,
                              pin_memory=False)
    # 验证数据集
    valid_loader = DataLoader(dataset=valid_dataset,
                              batch_size=config['batch_size'],
                              num_workers=config['num_workers'],
                              shuffle=False,
                              drop_last=False,
                              pin_memory=False)

    # **************************** 模型 ***********************
    logger.info("initializing model")
    model = BertFine.from_pretrained(config['bert_model_dir'],
                                     cache_dir=config['cache_dir'],
                                     num_classes=len(config['label_to_id']))

    # ************************** 优化器 *************************
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    num_train_steps = int(
        len(train_dataset.examples) / config['batch_size'] /
        config['gradient_accumulation_steps'] * config['epochs'])
    # t_total: total number of training steps for the learning rate schedule
    # warmup: portion of t_total for the warmup
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=config['learning_rate'],
                         warmup=config['warmup_proportion'],
                         t_total=num_train_steps)

    # **************************** callbacks ***********************
    logger.info("initializing callbacks")
    # 模型保存
    model_checkpoint = ModelCheckpoint(
        checkpoint_dir=config['checkpoint_dir'],
        mode=config['mode'],
        monitor=config['monitor'],
        save_best_only=config['save_best_only'],
        best_model_name=config['best_model_name'],
        epoch_model_name=config['epoch_model_name'],
        arch=config['arch'],
        logger=logger)
    # 监控训练过程
    train_monitor = TrainingMonitor(fig_dir=config['figure_dir'],
                                    json_dir=config['log_dir'],
                                    arch=config['arch'])
    # 学习率机制
    lr_scheduler = BertLr(optimizer=optimizer,
                          lr=config['learning_rate'],
                          t_total=num_train_steps,
                          warmup=config['warmup_proportion'])

    # **************************** training model ***********************
    logger.info('training model....')
    trainer = Trainer(model=model,
                      train_data=train_loader,
                      val_data=valid_loader,
                      optimizer=optimizer,
                      epochs=config['epochs'],
                      criterion=BCEWithLogLoss(),
                      logger=logger,
                      model_checkpoint=model_checkpoint,
                      training_monitor=train_monitor,
                      resume=config['resume'],
                      lr_scheduler=lr_scheduler,
                      n_gpu=config['n_gpus'],
                      label_to_id=config['label_to_id'],
                      evaluate=AUC(sigmoid=True))
    # 查看模型结构
    trainer.summary()
    # 拟合模型
    trainer.train()
    # 释放显存
    if len(config['n_gpus']) > 0:
        torch.cuda.empty_cache()