Exemple #1
0
def start():
    # 优先使用缓存
    if not os.path.exists(args.TRAIN) or not os.path.exists(args.VALID):
        produce_data(user_define=USER_DEFINE)

    if os.path.exists(args.TRAIN_CACHE):
        train_iter, num_train_steps = torch.load(args.TRAIN_CACHE)
    else:
        train_iter, num_train_steps = create_batch_iter("train")

    if os.path.exists(args.VALID_CACHE):
        eval_iter = torch.load(args.VALID_CACHE)
    else:
        eval_iter = create_batch_iter("dev")

    epoch_size = num_train_steps * args.train_batch_size * args.gradient_accumulation_steps / args.num_train_epochs

    pbar = ProgressBar(epoch_size=epoch_size, batch_size=args.train_batch_size)

    model = Bert_CRF.from_pretrained(args.bert_model, num_tag=len(args.labels))

    for name, param in model.named_parameters():
        if param.requires_grad:
            print(name)

    fit(model=model,
        training_iter=train_iter,
        eval_iter=eval_iter,
        num_epoch=args.num_train_epochs,
        pbar=pbar,
        num_train_steps=num_train_steps,
        verbose=1)
Exemple #2
0
def start():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--do_not_train_ernie",
        default=False,
        action='store_true',
    )
    parser.add_argument(
        "--do_CRF",
        default=False,
        action='store_true',
    )
    arg = parser.parse_args()
    args.do_not_train_ernie = arg.do_not_train_ernie
    args.do_CRF = arg.do_CRF

    produce_data()
    train_iter, num_train_steps = create_batch_iter("train")
    eval_iter = create_batch_iter("dev")

    epoch_size = num_train_steps * args.train_batch_size * args.gradient_accumulation_steps / args.num_train_epochs

    pbar = ProgressBar(epoch_size=epoch_size, batch_size=args.train_batch_size)
    if args.load_weight:
        model = load_model(args.output_dir)
    else:
        model = Bert_CRF.from_pretrained(args.bert_model,
                                         num_tag=len(args.labels))

    for name, param in model.named_parameters():
        if param.requires_grad:
            print(name)

    fit(model=model,
        training_iter=train_iter,
        eval_iter=eval_iter,
        num_epoch=args.num_train_epochs,
        pbar=pbar,
        num_train_steps=num_train_steps,
        verbose=1)
Exemple #3
0
def start():
    produce_data()
    train_iter, num_train_steps = create_batch_iter("train")
    eval_iter = create_batch_iter("dev")

    epoch_size = num_train_steps * args.train_batch_size * args.gradient_accumulation_steps / args.num_train_epochs

    pbar = ProgressBar(epoch_size=epoch_size, batch_size=args.train_batch_size)

    model = Bert_CRF.from_pretrained(args.bert_model, num_tag=len(args.labels))

    for name, param in model.named_parameters():
        if param.requires_grad:
            print(name)

    fit(model=model,
        training_iter=train_iter,
        eval_iter=eval_iter,
        num_epoch=args.num_train_epochs,
        pbar=pbar,
        num_train_steps=num_train_steps,
        verbose=1)
from Io.data_loader import create_batch_iter
from preprocessing.data_processor import produce_data
import torch
import os
import json
import config.args as args
from util.model_util import load_model

args.do_inference = True
produce_data()

test_iter = create_batch_iter("inference")
epoch_size = args.train_batch_size * args.gradient_accumulation_steps / args.num_train_epochs
model = load_model(args.output_dir)

num_epoch = args.num_train_epochs
device = torch.device(
    args.device if torch.cuda.is_available() and not args.no_cuda else "cpu")

param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']

optimizer_grouped_parameters = [{
    'params':
    [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
    'weight_decay':
    0.01
}, {
    'params':
    [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
    'weight_decay':