Beispiel #1
0
        np.save(result_src, total_y_pre)
        return y, total_y_pre


if __name__ == "__main__":
    # 模型路径
    model_save_src = "data/model/2_layer_lstm_model"
    num_category = 9
    # 向量化后的数据集
    x_src = "data/vectorized_data/test/x.npy"
    y_src = "data/vectorized_data/test/y.npy"

    result_src = "data/results/rnn_pre.npy"
    vocab_src = "data/middle_result/vocab.npy"
    data = Data()
    vocab, _ = data.load_vocab(vocab_src)

    # 模型
    config = TRNNConfig()
    config.vocab_size = len(vocab)
    model = TextRNN(config)
    # 测试
    print("Begin Testing")
    start_time = time.time()
    y, y_pre = test(x_src, y_src, result_src)
    print("the time is {}".format(get_time_dif(start_time)))

    # 评估
    precision_score, recall_score, f1_val, accuracy = evaluate(y, y_pre)

    for i in range(num_category):
Beispiel #2
0
            if total_batch - last_improved > require_improvement:
                # 验证集正确率长期不提升,提前结束训练
                print("No optimization for a long time, auto-stopping...")
                flag = True
                break  # 跳出循环
        if flag:  # 同上
            break
    print("the best acc on validation is {}".format(best_acc_val))


if __name__ == '__main__':
    train_dir = "data/vectorized_data/train"
    val_dir = "data/vectorized_data/validation"
    vocab_dir = "data/file_dict/train/vocab.npy"

    save_dir = 'data/model2'

    data_process = Data()

    config = CharCNNConfig()

    if not os.path.exists(vocab_dir):
        data_process.build_vocab(train_dir, vocab_dir)

    words, word_to_id = data_process.load_vocab(vocab_dir)
    config.vocab_size = len(words)

    model = CharCNN(config)
    train()