Exemple #1
0
def bilstm_text():
    w = pickle.load(open("weight.bin", "rb"))

    (vocab, train_data, dev_data, test_data) = read_data()

    model_lstm = MyBLSTMText(class_num=4,
                             vocab_size=len(vocab),
                             dropout=0.5,
                             embed_weights=w)
    loss = CrossEntropyLoss()
    metrics = AccuracyMetric()
    trainer = Trainer(model=model_lstm,
                      train_data=train_data,
                      dev_data=dev_data,
                      optimizer=Adam(lr=0.0015),
                      print_every=10,
                      use_tqdm=False,
                      device='cuda:0',
                      save_path="./lstm_model",
                      loss=loss,
                      metrics=metrics)
    # callbacks=[EarlyStopCallback(10)])

    trainer.train()

    tester = Tester(test_data, model_lstm, metrics=AccuracyMetric())
    tester.test()
Exemple #2
0
def train():
    train_data = pickle.load(open(opt.train_data_path, 'rb'))
    validate_data = pickle.load(open(opt.validate_data_path, 'rb'))

    vocab = pickle.load(open(opt.vocab, 'rb'))
    word2idx = vocab.word2idx
    idx2word = vocab.idx2word
    vocab_size = len(word2idx)
    print("vocab_size" + str(vocab_size))
    embedding_dim = opt.embedding_dim
    hidden_dim = opt.hidden_dim
    model = utils.find_class_by_name(opt.model_name,
                                     [models])(vocab_size, embedding_dim,
                                               hidden_dim)

    if not os.path.exists(opt.save_model_path):
        os.mkdir(opt.save_model_path)

    # define dataloader
    train_data.set_input('input_data', flag=True)
    train_data.set_target('target', flag=True)
    validate_data.set_input('input_data', flag=True)
    validate_data.set_target('target', flag=True)

    if opt.optimizer == 'Adagrad':
        _optimizer = Adagrad(lr=opt.learning_rate, weight_decay=0)
    elif opt.optimizer == 'SGD':
        _optimizer = SGD(lr=opt.learning_rate, momentum=0)
    elif opt.optimizer == 'SGD_momentum':
        _optimizer = SGD(lr=opt.learning_rate, momentum=0.9)
    elif opt.optimizer == 'Adam':
        _optimizer = Adam(lr=opt.learning_rate, weight_decay=0)

    overfit_trainer = Trainer(model=model,
                              train_data=train_data,
                              loss=MyCrossEntropyLoss(pred="output",
                                                      target="target"),
                              n_epochs=opt.epoch,
                              batch_size=opt.batch_size,
                              device='cuda:0',
                              dev_data=validate_data,
                              metrics=MyPPMetric(pred="output",
                                                 target="target"),
                              metric_key="-pp",
                              validate_every=opt.validate_every,
                              optimizer=_optimizer,
                              callbacks=[EarlyStopCallback(opt.patience)],
                              save_path=opt.save_model_path)

    overfit_trainer.train()
Exemple #3
0
from fasNLP_dataloader import Semeval_task_8_corpus_Pipe
from fastNLP.core import AccuracyMetric,CrossEntropyLoss,Adam
import json
from fastNLP.core.const import Const as C
from fastNLP import Trainer

config =BertConfig.from_json_file(bert_config_txt_path)
bert_word_dims =config.hidden_size



bert_model_real =BertModel.from_pretrained(pretrained_model_name_or_path=english_bert_base_dir_)

if __name__ == '__main__':
    print('##'*10)
    data_pipe = Semeval_task_8_corpus_Pipe().process_from_file({'train': train_data_path, 'test': test_data_path})
    print('##'*10)
    with open(relation2id_dict_path, mode='rt', encoding='utf-8') as dict_inp:
        relation2id_dict = json.load(dict_inp)
    c_bert_model =C_Bert_for_RE_model(bert_dims=bert_word_dims,n_class=len(relation2id_dict),bert_model=bert_model_real,dropout=0.1)
    metric =AccuracyMetric(pred=C.OUTPUT,target=C.TARGET)
    loss =CrossEntropyLoss(pred=C.OUTPUT,target=C.TARGET)
    optimizer =Adam(lr=learning_rate,model_params=c_bert_model.parameters())
    trainer =Trainer(train_data=data_pipe.get_dataset('train'),dev_data=data_pipe.get_dataset('test'),model=c_bert_model,optimizer=optimizer, \
                     metrics=metric,batch_size=BATCH_SIZE,n_epochs=EPOCHES_NUM,save_path='c_bert_model_save_dir',loss=loss \
                     )
    result =trainer.train(load_best_model=True)
    print('训练完毕,全局信息:')
    for i,j in result.items():
        print(i,':',j)
def train():
    train_data = pickle.load(open(opt.train_data_path, 'rb'))
    validate_data = pickle.load(open(opt.validate_data_path, 'rb'))

    vocab = pickle.load(open(opt.vocab, 'rb'))
    word2idx = vocab.word2idx
    idx2word = vocab.idx2word
    input_size = len(word2idx)

    vocab_size = opt.class_num
    class_num = opt.class_num

    embedding_dim = opt.embedding_dim

    if opt.model_name == "LSTMModel":
        model = utils.find_class_by_name(opt.model_name,
                                         [models])(input_size, vocab_size,
                                                   embedding_dim,
                                                   opt.use_word2vec,
                                                   opt.embedding_weight_path)
    elif opt.model_name == "B_LSTMModel":
        model = utils.find_class_by_name(opt.model_name,
                                         [models])(input_size, vocab_size,
                                                   embedding_dim,
                                                   opt.use_word2vec,
                                                   opt.embedding_weight_path)
    elif opt.model_name == "CNNModel":
        model = utils.find_class_by_name(opt.model_name,
                                         [models])(input_size, vocab_size,
                                                   embedding_dim,
                                                   opt.use_word2vec,
                                                   opt.embedding_weight_path)
    elif opt.model_name == "MyBertModel":
        #bert_dir = "./BertPretrain"
        #bert_dir = None
        #model = utils.find_class_by_name(opt.model_name, [models])(10, 0.1, 4, bert_dir)
        train_data.apply(lambda x: x['input_data'][:2500],
                         new_field_name='input_data')
        validate_data.apply(lambda x: x['input_data'][:2500],
                            new_field_name='input_data')

        model = utils.find_class_by_name(opt.model_name, [models])(
            input_size=input_size,
            hidden_size=512,
            hidden_dropout_prob=0.1,
            num_labels=class_num,
            use_word2vec=opt.use_word2vec,
            embedding_weight_path=opt.embedding_weight_path,
        )

    if not os.path.exists(opt.save_model_path):
        os.mkdir(opt.save_model_path)

    # define dataloader
    train_data.set_input('input_data', flag=True)
    train_data.set_target('target', flag=True)
    validate_data.set_input('input_data', flag=True)
    validate_data.set_target('target', flag=True)

    if opt.optimizer == 'SGD':
        _optimizer = SGD(lr=opt.learning_rate, momentum=0)
    elif opt.optimizer == 'SGD_momentum':
        _optimizer = SGD(lr=opt.learning_rate, momentum=0.9)
    elif opt.optimizer == 'Adam':
        _optimizer = Adam(lr=opt.learning_rate, weight_decay=0)

    overfit_trainer = Trainer(
        model=model,
        train_data=train_data,
        loss=CrossEntropyLoss(pred="output", target="target"),
        n_epochs=opt.epoch,
        batch_size=opt.batch_size,
        device=[0, 1, 2, 3],
        #device=None,
        dev_data=validate_data,
        metrics=AccuracyMetric(pred="output", target="target"),
        metric_key="+acc",
        validate_every=opt.validate_every,
        optimizer=_optimizer,
        callbacks=[EarlyStopCallback(opt.patience)],
        save_path=opt.save_model_path)

    overfit_trainer.train()
Exemple #5
0
    )
elif arg.task == 'quora':
    data_info = QuoraLoader().process(
        paths='path/to/quora/data', to_lower=True, seq_len_type=arg.seq_len_type,
        bert_tokenizer=arg.bert_dir, cut_text=512,
        get_index=True, concat='bert',
    )
else:
    raise RuntimeError(f'NOT support {arg.task} task yet!')

# define model
model = BertForNLI(class_num=len(data_info.vocabs[Const.TARGET]), bert_dir=arg.bert_dir)

# define trainer
trainer = Trainer(train_data=data_info.datasets[arg.train_dataset_name], model=model,
                  optimizer=Adam(lr=arg.lr, model_params=model.parameters()),
                  batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
                  n_epochs=arg.n_epochs, print_every=-1,
                  dev_data=data_info.datasets[arg.dev_dataset_name],
                  metrics=AccuracyMetric(), metric_key='acc',
                  device=[i for i in range(torch.cuda.device_count())],
                  check_code_level=-1,
                  save_path=arg.save_path)

# train model
trainer.train(load_best_model=True)

# define tester
tester = Tester(
    data=data_info.datasets[arg.test_dataset_name],
    model=model,