Exemplo n.º 1
0
def main():

    config = Config(yaml_file="./bert.yaml")
    config.build()
    config.Print()

    device = set_device("gpu" if config.use_cuda else "cpu")
    fluid.enable_dygraph(device)

    bert_config = BertConfig(config.bert_config_path)
    bert_config.print_config()

    tokenizer = tokenization.FullTokenizer(
        vocab_file=config.vocab_path, do_lower_case=config.do_lower_case)

    def mnli_line_processor(line_id, line):
        if line_id == "0":
            return None
        uid = tokenization.convert_to_unicode(line[0])
        text_a = tokenization.convert_to_unicode(line[8])
        text_b = tokenization.convert_to_unicode(line[9])
        label = tokenization.convert_to_unicode(line[-1])
        if label not in ["contradiction", "entailment", "neutral"]:
            label = "contradiction"
        return BertInputExample(
            uid=uid, text_a=text_a, text_b=text_b, label=label)

    train_dataloader = BertDataLoader(
        "./data/glue_data/MNLI/train.tsv",
        tokenizer, ["contradiction", "entailment", "neutral"],
        max_seq_length=config.max_seq_len,
        batch_size=config.batch_size,
        line_processor=mnli_line_processor)

    test_dataloader = BertDataLoader(
        "./data/glue_data/MNLI/dev_matched.tsv",
        tokenizer, ["contradiction", "entailment", "neutral"],
        max_seq_length=config.max_seq_len,
        batch_size=config.batch_size,
        line_processor=mnli_line_processor,
        shuffle=False,
        phase="predict")

    trainer_count = fluid.dygraph.parallel.Env().nranks
    num_train_examples = len(train_dataloader.dataset)
    max_train_steps = config.epoch * num_train_examples // config.batch_size // trainer_count
    warmup_steps = int(max_train_steps * config.warmup_proportion)

    print("Trainer count: %d" % trainer_count)
    print("Num train examples: %d" % num_train_examples)
    print("Max train steps: %d" % max_train_steps)
    print("Num warmup steps: %d" % warmup_steps)

    inputs = [
        Input(
            [None, None], 'int64', name='src_ids'), Input(
                [None, None], 'int64', name='pos_ids'), Input(
                    [None, None], 'int64', name='sent_ids'), Input(
                        [None, None, 1], 'float32', name='input_mask')
    ]

    labels = [Input([None, 1], 'int64', name='label')]

    cls_model = ClsModelLayer(
        config,
        bert_config,
        len(["contradiction", "entailment", "neutral"]),
        return_pooled_out=True)

    optimizer = make_optimizer(
        warmup_steps=warmup_steps,
        num_train_steps=max_train_steps,
        learning_rate=config.learning_rate,
        weight_decay=config.weight_decay,
        scheduler=config.lr_scheduler,
        model=cls_model,
        loss_scaling=config.loss_scaling,
        parameter_list=cls_model.parameters())

    cls_model.prepare(
        optimizer,
        SoftmaxWithCrossEntropy(),
        Accuracy(topk=(1, 2)),
        inputs,
        labels,
        device=device)

    cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True)

    # do train
    cls_model.fit(train_data=train_dataloader.dataloader,
                  epochs=config.epoch,
                  save_dir=config.checkpoints)

    # do eval
    cls_model.evaluate(
        eval_data=test_dataloader.dataloader, batch_size=config.batch_size)
Exemplo n.º 2
0
import paddle
from hapi.model import set_device
from hapi.text.bert.dataloader import SingleSentenceDataLoader
import hapi.text.tokenizer.tokenization as tokenization

device = set_device("cpu")
paddle.fluid.enable_dygraph(device)

tokenizer = tokenization.FullTokenizer(
    vocab_file="./tmp/hapi/data/pretrained_models/uncased_L-12_H-768_A-12/vocab.txt",
    do_lower_case=True)

bert_dataloader = SingleSentenceDataLoader(
    "./tmp/hapi/aaa.txt",
    tokenizer, ["1", "2"],
    max_seq_length=32,
    batch_size=1)

for data in bert_dataloader.dataloader():
    print(data)