Example #1
0
 def test_case2(self):
     # 测试使用int初始化
     model = Model()
     if torch.cuda.is_available():
         model = _move_model_to_device(model, 0)
         assert model.param.device == torch.device('cuda:0')
         assert model.param.device == torch.device('cuda:0'), "The model should be in "
         with self.assertRaises(Exception):
             _move_model_to_device(model, 100)
         with self.assertRaises(Exception):
             _move_model_to_device(model, -1)
Example #2
0
 def test_case5(self):
     if not torch.cuda.is_available():
         return
     # torch.device()
     device = torch.device('cpu')
     model = Model()
     _move_model_to_device(model, device)
     device = torch.device('cuda')
     model = _move_model_to_device(model, device)
     assert model.param.device == torch.device('cuda:0')
     with self.assertRaises(Exception):
         _move_model_to_device(model, torch.device('cuda:100'))
Example #3
0
 def test_case1(self):
     # 测试str
     model = Model()
     model = _move_model_to_device(model, 'cpu')
     assert model.param.device == torch.device('cpu')
     # 测试不存在的device报错
     with self.assertRaises(Exception):
         _move_model_to_device(model, 'cpuu')
     # 测试gpu
     if torch.cuda.is_available():
         model = _move_model_to_device(model, 'cuda')
         assert model.param.is_cuda
         model = _move_model_to_device(model, 'cuda:0')
         assert model.param.device == torch.device('cuda:0')
         with self.assertRaises(Exception):
             _move_model_to_device(model, 'cuda:1000')
     # 测试None
     model = _move_model_to_device(model, None)
Example #4
0
 def test_case3(self):
     # 测试None
     model = Model()
     device = _get_model_device(model)
     model = _move_model_to_device(model, None)
     assert device == _get_model_device(model), "The device should not change."
     if torch.cuda.is_available():
         model.cuda()
         device = _get_model_device(model)
         model = _move_model_to_device(model, None)
         assert device == _get_model_device(model), "The device should not change."
         
         model = nn.DataParallel(model, device_ids=[0])
         _move_model_to_device(model, None)
         with self.assertRaises(Exception):
             _move_model_to_device(model, 'cpu')
Example #5
0
 def test_case4(self):
     # 测试传入list的内容
     model = Model()
     device = ['cpu']
     with self.assertRaises(Exception):
         _move_model_to_device(model, device)
     if torch.cuda.is_available():
         device = [0]
         _model = _move_model_to_device(model, device)
         assert not isinstance(_model, nn.DataParallel)
         device = [torch.device('cuda:0'), torch.device('cuda:0')]
         with self.assertRaises(Exception):
             _model = _move_model_to_device(model, device)
         if torch.cuda.device_count() > 1:
             device = [0, 1]
             _model = _move_model_to_device(model, device)
             assert isinstance(_model, nn.DataParallel)
             device = ['cuda', 'cuda:1']
             with self.assertRaises(Exception):
                 _move_model_to_device(model, device)
Example #6
0
test_data.rename_field('Phrase', 'raw_words')

test_data.apply(get_words, new_field_name='words')

vocab_all.index_dataset(test_data, field_name='words')

test_data.set_input('words')
'''
EMBED_DIM = 100
model = CNNText((len(vocab_all),EMBED_DIM), num_classes=len(vocab_target), dropout=0.1)
'''
device = 0 if torch.cuda.is_available() else 'cpu'
embed = BertEmbedding(vocab_all, model_dir_or_name='en', include_cls_sep=True)
model = BertForSequenceClassification(embed, len(vocab_target))
ModelLoader.load_pytorch(model, 'save_model/ceshi.pkl')
_move_model_to_device(model, device=device)


#pred = model_cnn.predict(torch.LongTensor([test_data[10]['words']]))
def predict(instance):
    x_batch = torch.LongTensor([instance['words']])
    x_batch = x_batch.to(device=_get_model_device(model))
    pred = model.predict(x_batch)
    pred = vocab_target.to_word(int(pred['pred']))
    return pred


test_data.apply(predict, new_field_name='target')

out_file = open('data/1/sub2021.1.19.csv', 'w')
out_file.write('PhraseId,Sentiment\n')
Example #7
0
def train():
    n_epochs = 10
    train_set = data_set_loader._load('../models/all4bert_new_triple.txt')
    train_set, tmp_set = train_set.split(0.2)
    val_set, test_set = tmp_set.split(0.5)
    data_bundle = [train_set, val_set, test_set]

    for dataset in data_bundle:
        dataset.apply(addWords, new_field_name="p_words")
        dataset.apply(addWordPiece, new_field_name="t_words")
        dataset.apply(processItem, new_field_name="word_pieces")
        dataset.apply(processNum, new_field_name="word_nums")
        dataset.apply(addSeqlen, new_field_name="seq_len")
        dataset.apply(processTarget, new_field_name="target")

    for dataset in data_bundle:
        dataset.field_arrays["word_pieces"].is_input = True
        dataset.field_arrays["seq_len"].is_input = True
        dataset.field_arrays["word_nums"].is_input = True
        dataset.field_arrays["target"].is_target = True

    print("In total " + str(len(data_bundle)) + " datasets:")
    print("Trainset has " + str(len(train_set)) + " instances.")
    print("Validateset has " + str(len(val_set)) + " instances.")
    print("Testset has " + str(len(test_set)) + " instances.")
    train_set.print_field_meta()
    # print(train_set)
    from fastNLP.models.Mybert import BertForSentenceMatching
    from fastNLP import AccuracyMetric, DataSetIter

    from fastNLP.core.utils import _pseudo_tqdm as tqdm
    # 注意这里是表明分的类数
    model = BertForSentenceMatching(embed, 3)
    if torch.cuda.is_available():
        model = _move_model_to_device(model, device=0)
    # print(model)
    train_batch = DataSetIter(batch_size=16, dataset=train_set, sampler=None)
    optimizer = torch.optim.Adam(model.parameters(), lr=2e-5)
    Lossfunc = torch.nn.CrossEntropyLoss()
    with tqdm(total=n_epochs,
              postfix='loss:{0:<6.5f}',
              leave=False,
              dynamic_ncols=True) as pbar:
        print_every = 10
        for epoch in range(1, n_epochs + 1):
            pbar.set_description_str(
                desc="Epoch {}/{}".format(epoch, n_epochs))
            avg_loss = 0
            step = 0
            for batch_x, batch_y in train_batch:
                step += 1
                _move_dict_value_to_device(batch_x,
                                           batch_y,
                                           device=_get_model_device(model))
                optimizer.zero_grad()
                output = model.forward(batch_x["word_pieces"],
                                       batch_x["word_nums"],
                                       batch_x["seq_len"])
                loss = Lossfunc(output['pred'], batch_y['target'])
                loss.backward()
                optimizer.step()
                avg_loss += loss.item()
                if step % print_every == 0:
                    avg_loss = float(avg_loss) / print_every
                    print_output = "[epoch: {:>3} step: {:>4}] train loss: {:>4.6}".format(
                        epoch, step, avg_loss)
                    pbar.update(print_every)
                    pbar.set_postfix_str(print_output)
                    avg_loss = 0
            metric = AccuracyMetric()
            val_batch = DataSetIter(batch_size=8,
                                    dataset=val_set,
                                    sampler=None)
            for batch_x, batch_y in val_batch:
                _move_dict_value_to_device(batch_x,
                                           batch_y,
                                           device=_get_model_device(model))
                output = model.predict(batch_x["word_pieces"],
                                       batch_x["word_nums"],
                                       batch_x["seq_len"])
                metric(output, batch_y)
            eval_result = metric.get_metric()
            print("ACC on Validate Set:", eval_result)
            from fastNLP.io import ModelSaver
            saver = ModelSaver("../models/bert_model_max_triple.pkl")
            saver.save_pytorch(model, param_only=False)
        pbar.close()
    metric = AccuracyMetric()
    test_batch = DataSetIter(batch_size=8, dataset=test_set, sampler=None)
    for batch_x, batch_y in test_batch:
        _move_dict_value_to_device(batch_x,
                                   batch_y,
                                   device=_get_model_device(model))
        output = model.predict(batch_x["word_pieces"], batch_x["word_nums"],
                               batch_x["seq_len"])
        metric(output, batch_y)
    eval_result = metric.get_metric()
    print("ACC on Test Set:", eval_result)
    from fastNLP.io import ModelSaver
    saver = ModelSaver("../models/bert_model_max_triple.pkl")
    saver.save_pytorch(model, param_only=False)