Esempio n. 1
0
def main():
    # 读配置文件
    config = parse_config()
    # 载入训练集合
    train_data = DataBatchIterator(config=config,
                                   is_train=True,
                                   dataset="train",
                                   batch_size=config.batch_size,
                                   shuffle=True)
    train_data.load()

    vocab = train_data.vocab

    # 载入测试集合
    test_data = DataBatchIterator(config=config,
                                  is_train=False,
                                  dataset="test",
                                  batch_size=config.batch_size)
    test_data.set_vocab(vocab)
    test_data.load()

    # 测试时
    checkpoint = torch.load(config.save_model + ".pt",
                            map_location=config.device)
    model = checkpoint

    # model = build_textcnn_model(
    #     vocab, config, train=True)
    predict, label = test_textcnn_model(model, test_data, config)
    print(classification_report(label, predict))
Esempio n. 2
0
def main():
    # 读配置文件
    config = parse_config()
    # 载入训练集合
    train_data = DataBatchIterator(
        config=config,
        is_train=True,
        dataset="train",
        #batch_size=config.batch_size,
        shuffle=True)
    train_data.load()

    vocab = train_data.vocab

    # 载入测试集合
    valid_data = DataBatchIterator(
        config=config,
        is_train=False,
        dataset="dev",
        #batch_size=config.batch_size
    )
    valid_data.set_vocab(vocab)
    valid_data.load()

    # 构建textcnn模型
    model = build_textcnn_model(vocab, config, train=True)

    print(model)

    # Do training.
    padding_idx = vocab.stoi[PAD]
    train_textcnn_model(model, train_data, valid_data, padding_idx, config)
    torch.save(model, '%s.pt' % (config.save_model))
Esempio n. 3
0
def main():
    config = parse_config()
    checkpoint = torch.load(config.save_model + ".pt",
                            map_location=config.device)

    train_data = DataBatchIterator(config=config,
                                   is_train=True,
                                   dataset="train",
                                   batch_size=config.batch_size,
                                   shuffle=True)

    train_data.load()

    vocab = train_data.vocab

    # 载入测试集合
    valid_data = DataBatchIterator(config=config,
                                   is_train=False,
                                   dataset="test",
                                   batch_size=config.batch_size)
    valid_data.set_vocab(vocab)
    valid_data.load()
    # Do training.
    padding_idx = vocab.stoi[PAD]
    train_textcnn_model(checkpoint, train_data, valid_data, padding_idx,
                        config)
Esempio n. 4
0
def main():
    # 读配置文件
    config = parse_config()
    # 载入训练集合
    train_data = DataBatchIterator(config=config,
                                   is_train=True,
                                   dataset="train",
                                   batch_size=config.batch_size,
                                   shuffle=True)
    train_data.load()

    vocab = train_data.vocab  #词汇映射表

    # 载入测试集合
    test_data = DataBatchIterator(config=config,
                                  is_train=False,
                                  dataset="test",
                                  batch_size=config.batch_size)
    test_data.set_vocab(vocab)
    test_data.load()

    # 测试时载入模型
    model = torch.load(config.save_model + ".pt", map_location=config.device)

    print(model)

    test(model, test_data)
Esempio n. 5
0
def main():
    # 读配置文件
    config = parse_config()
    # 载入训练集合
    train_data = DataBatchIterator(
        config=config,
        is_train=True,
        dataset="train",
        batch_size=config.batch_size,
        shuffle=True)
    train_data.load()

    vocab = train_data.vocab

    # 载入测试集合
    valid_data = DataBatchIterator(
        config=config,
        is_train=False,
        dataset="dev",
        batch_size=config.batch_size)
    valid_data.set_vocab(vocab)
    valid_data.load()

    # 构建textcnn模型
    model = build_textcnn_model(
        vocab, config, train=True)

    print(model)
    # Do training.
    padding_idx = vocab.stoi[PAD]
    #train_textcnn_model(model, train_data,
    #                    valid_data, padding_idx, config)
    #torch.save(model, '%s.pt' % (config.save_model))


    # 测试时
    #加载测试集
    test_data = DataBatchIterator(
        config=config,
        is_train=False,
        dataset="test",
        batch_size=config.batch_size)
    test_data.load()

    #读取训练好的模型
    checkpoint = torch.load(config.save_model+".pt",
                         map_location = config.device)
    #测试并打印评价
    test_valid(checkpoint , config , test_data)
Esempio n. 6
0
def main():
    # 读配置文件
    config = parse_config()
    # 载入训练集合
    train_data = DataBatchIterator(config=config,
                                   is_train=True,
                                   dataset="train",
                                   batch_size=config.batch_size,
                                   shuffle=True)
    train_data.load()

    vocab = train_data.vocab

    # 载入测试集合
    valid_data = DataBatchIterator(config=config,
                                   is_train=False,
                                   dataset="test",
                                   batch_size=config.batch_size)
    valid_data.set_vocab(vocab)
    valid_data.load()

    # 测试时
    checkpoint = torch.load(config.save_model + ".pt",
                            map_location=config.device)
    model = checkpoint
    # model = build_textcnn_model(
    #     vocab, config, train=True)
    model.eval()
    total_loss = 0
    valid_data_iter = iter(valid_data)
    total_predict = torch.LongTensor([])
    total_label = torch.LongTensor([])
    for idx, batch in enumerate(valid_data_iter):
        model.zero_grad()
        ground_truth = batch.label
        # batch_first = False
        outputs = model(batch.sent)
        predict = torch.max(outputs, 1)[1]
        total_predict = torch.cat((total_predict, predict), 0)
        total_label = torch.cat((total_label, batch.label), 0)
    print(classification_report(total_label, total_predict))