示例#1
0
def get_cnn_result():
    if not os.path.exists(embedding_model_path):
        print("word2vec model is not found")

    if not os.path.exists(train_data_path):
        print("train params is not found")

    params = readdata.loadDict(train_data_path)
    train_length = int(params['max_sentences_length'])

    test_sample_lists = readdata.get_cleaned_list(test_file_path)
    test_sample_lists, max_sentences_length = readdata.padding_sentences(
        test_sample_lists,
        padding_token='<PADDING>',
        padding_sentence_length=train_length)
    test_sample_arrays = np.array(
        word2vec.get_embedding_vector(test_sample_lists, embedding_model_path))
    testconfig = config()
    testconfig.max_sentences_length = max_sentences_length

    sess = tf.InteractiveSession()
    cnn = cnn_model.TextCNN(config=testconfig)

    #加载参数
    saver = tf.train.Saver()
    saver.restore(sess, "data/cnn/text_model")

    #定义测试函数
    def test_step(x_batch):
        feed_dict = {cnn.input_x: x_batch, cnn.dropout_keep_prob: 1.0}
        predictions, scores = sess.run([cnn.predictions, cnn.softmax_result],
                                       feed_dict=feed_dict)
        return (predictions, scores)

    #拿到结果
    predictions, scores = test_step(test_sample_arrays)
    return np.array(predictions)
示例#2
0
def get_mixed_result():
    if not os.path.exists(embedding_model_path):
        print("word2vec model is not found")

    if not os.path.exists(lstm_train_data_path):
        print("lstm train params is not found")

    lstm_params = readdata.loadDict(lstm_train_data_path)
    lstm_train_length = int(lstm_params['max_sentences_length'])

    if not os.path.exists(cnn_train_data_path):
        print("cnn train params is not found")

    cnn_params = readdata.loadDict(cnn_train_data_path)
    cnn_train_length = int(cnn_params['max_sentences_length'])

    test_sample_lists = readdata.get_cleaned_list(test_file_path)
    lstm_test_sample_lists, lstm_max_sentences_length = readdata.padding_sentences(
        test_sample_lists,
        padding_token='<PADDING>',
        padding_sentence_length=lstm_train_length)
    cnn_test_sample_lists, cnn_max_sentences_length = readdata.padding_sentences(
        test_sample_lists,
        padding_token='<PADDING>',
        padding_sentence_length=cnn_train_length)
    lstm_test_sample_arrays = np.array(
        word2vec.get_embedding_vector(lstm_test_sample_lists,
                                      embedding_model_path))
    cnn_test_sample_arrays = np.array(
        word2vec.get_embedding_vector(cnn_test_sample_lists,
                                      embedding_model_path))
    lstm_config = lstmconfig()
    cnn_config = cnnconfig()
    lstm_config.max_sentences_length = lstm_max_sentences_length
    cnn_config.max_sentences_length = cnn_max_sentences_length

    lstm_graph = tf.Graph()
    cnn_graph = tf.Graph()
    lstm_sess = tf.Session(graph=lstm_graph)
    cnn_sess = tf.Session(graph=cnn_graph)

    with lstm_sess.as_default():
        with lstm_graph.as_default():
            lstm = lstm_model.TextLSTM(config=lstm_config)
            lstm_saver = tf.train.Saver()
            lstm_saver.restore(lstm_sess, "data/lstm/text_model")

            def lstm_test_step(x_batch):
                feed_dict = {
                    lstm.input_x: x_batch,
                    lstm.dropout_keep_prob: lstm_config.dropout_keep_prob
                }
                scores = lstm_sess.run([lstm.softmax_result],
                                       feed_dict=feed_dict)
                return scores

            lstm_scores = lstm_test_step(lstm_test_sample_arrays)

    with cnn_sess.as_default():
        with cnn_graph.as_default():
            cnn = cnn_model.TextCNN(config=cnn_config)
            cnn_saver = tf.train.Saver()
            cnn_saver.restore(cnn_sess, "data/cnn/text_model")

            def cnn_test_step(x_batch):
                feed_dict = {
                    cnn.input_x: x_batch,
                    cnn.dropout_keep_prob: cnn_config.dropout_keep_prob
                }
                scores = cnn_sess.run([cnn.softmax_result],
                                      feed_dict=feed_dict)
                return scores

            cnn_scores = cnn_test_step(cnn_test_sample_arrays)

    lstm_sess.close()
    cnn_sess.close()
    mixed_scores = np.sum([lstm_scores, cnn_scores], axis=0)
    predictions = np.argmax(mixed_scores, axis=2)
    return np.array(predictions)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))

y_train = np.array(list(y_train))
y_test = np.array(list(y_test))
embedding_dim = 128
filter_sizes = '3,4,5'
l2_reg_lambda = 0.0
num_filters = 128

with tf.Session() as sess:
    cnn = cnn_model.TextCNN(sequence_length=dataset.get_max_len(),
                            num_classes=len(config.classes),
                            vocab_size=len(vocab_processor.vocabulary_),
                            embedding_size=embedding_dim,
                            filter_sizes=list(map(int,
                                                  filter_sizes.split(","))),
                            num_filters=num_filters,
                            l2_reg_lambda=l2_reg_lambda)
    out_dir = os.path.abspath(os.path.join('tmp', "runs/"))
    print("Writing to {}\n".format(out_dir))

    # Summaries for loss and accuracy

    loss_summary = tf.summary.scalar("loss", cnn.loss)
    acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)

    # Train Summaries
    train_summary_op = tf.summary.merge(
        [loss_summary, acc_summary, cnn.grad_summaries_merged])
    train_summary_dir = os.path.join(out_dir, "summaries", "train")
num_tests = int(trainconfig.test_sample_percentage * len(all_label_arrays))
del all_label_arrays, random_index
test_sample_arrays = random_sample_arrays[:num_tests]
train_sample_arrays = random_sample_arrays[num_tests:]
del random_sample_arrays
test_label_arrays = random_label_arrays[:num_tests]
train_label_arrays = random_label_arrays[num_tests:]
del random_label_arrays
print("Train/Test split: {:d}/{:d}".format(len(train_label_arrays),
                                           len(test_label_arrays)))

#开始训练
with tf.Graph().as_default():
    sess = tf.Session()
    with sess.as_default():
        cnn = cnn_model.TextCNN(config=trainconfig)

        #初始化参数
        train_writer = tf.summary.FileWriter(log_path + '/train', sess.graph)
        test_writer = tf.summary.FileWriter(log_path + '/test')
        step_num = 0
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        #定义训练函数
        def train_step(x_batch, y_batch, lr_rate):
            feed_dict = {
                cnn.input_x: x_batch,
                cnn.input_y: y_batch,
                cnn.dropout_keep_prob: trainconfig.dropout_keep_prob,
                cnn.learning_rate: lr_rate