Пример #1
0
def predict(sess, cnn, test, alphabet, batch_size, q_len, a_len, step, type):
    scores = []
    d = get_overlap_dict(test, alphabet, q_len, a_len)
    for data in batch_gen_with_single(test,
                                      alphabet,
                                      batch_size,
                                      q_len,
                                      a_len,
                                      overlap_dict=d):
        feed_dict = {
            cnn.question: data[0],
            cnn.answer: data[1],
            cnn.answer_negative: data[1],
            cnn.q_pos_overlap: data[2],
            cnn.q_neg_overlap: data[2],
            cnn.a_pos_overlap: data[3],
            cnn.a_neg_overlap: data[3],
            cnn.q_position: data[4],
            cnn.a_pos_position: data[5],
            cnn.a_neg_position: data[5]
        }

        score = sess.run(cnn.score12, feed_dict)

        scores.extend(score)
    with open(data_file + '_' + type + "_score_%d.txt" % step, 'w') as ff:
        string_tmp = '\n'.join([str(i) for i in scores])
        ff.write(string_tmp)
    return np.array(scores[:len(test)])
Пример #2
0
def predict(sess, cnn, test, alphabet, batch_size, q_len, a_len):
    scores = []
    d = get_overlap_dict(test, alphabet, q_len, a_len)
    for data in batch_gen_with_single(test,
                                      alphabet,
                                      batch_size,
                                      q_len,
                                      a_len,
                                      overlap_dict=d):
        feed_dict = {
            cnn.question: data[0],
            cnn.answer: data[1],
            cnn.answer_negative: data[1],
            cnn.q_pos_overlap: data[2],
            cnn.q_neg_overlap: data[2],
            cnn.a_pos_overlap: data[3],
            cnn.a_neg_overlap: data[3],
            cnn.q_position: data[4],
            cnn.a_pos_position: data[5],
            cnn.a_neg_position: data[5]
        }

        score = sess.run(cnn.score12, feed_dict)
        # print len(score)
        # if batch_size == 20:
        #     attention.extend((q,a))
        scores.extend(score)
    pickle.dump(attention, open('attention.file', 'w'))
    return np.array(scores[:len(test)])
Пример #3
0
def predict(sess, cnn, dev, alphabet, batch_size, q_len):
    scores = []
    for data in batch_gen_with_single(dev, alphabet, batch_size, q_len):
        feed_dict = {
            cnn.question: data[0],
            cnn.q_position: data[1],
            cnn.dropout_keep_prob: 1.0
        }
        score = sess.run(cnn.scores, feed_dict)
        scores.extend(score)
    return np.array(scores[:len(dev)])
Пример #4
0
def predict(sess, cnn, test, alphabet, batch_size, q_len, a_len):
    scores = []
    d = get_overlap_dict(test, alphabet, q_len, a_len)
    for data in batch_gen_with_single(test,
                                      alphabet,
                                      batch_size,
                                      q_len,
                                      a_len,
                                      overlap_dict=d):
        feed_dict = {
            cnn.question: data[0],
            cnn.answer: data[1],
            cnn.q_overlap: data[2],
            cnn.a_overlap: data[3],
            cnn.q_position: data[4],
            cnn.a_position: data[5]
        }
        score = sess.run(cnn.scores, feed_dict)
        scores.extend(score)
    return np.array(scores[:len(test)])
Пример #5
0
     q_max_sent_length = max(
         map(lambda x: len(x), train['question'].str.split()))
     alphabet, embeddings = prepare([train, test, dev],
                                    max_sent_length=q_max_sent_length,
                                    dim=FLAGS.embedding_dim,
                                    is_embedding_needed=True,
                                    fresh=True)
     with tf.Session() as sess:
         saver.restore(sess, ckpt.model_checkpoint_path)
         graph = tf.get_default_graph()
         scores = []
         question = graph.get_operation_by_name('input_question').outputs[0]
         q_position = graph.get_operation_by_name('q_position').outputs[0]
         dropout_keep_prob = graph.get_operation_by_name(
             'dropout_keep_prob').outputs[0]
         for data in batch_gen_with_single(test, alphabet, FLAGS.batch_size,
                                           q_max_sent_length):
             feed_dict = {
                 question.name: data[0],
                 q_position.name: data[1],
                 dropout_keep_prob.name: 1.0
             }
             score = sess.run("output/scores:0", feed_dict)
             scores.extend(score)
         scores = np.array(scores[:len(test)])
         predicted_label = np.argmax(scores, 1)
         acc_test = acc_train = accuracy_score(predicted_label,
                                               test['flag'])
         print("test epoch:acc {}".format(acc_test))
 else:
     for i in range(1, FLAGS.n_fold + 1):
         print("{} cross validation ".format(i))