FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
    print("{}={}".format(attr.upper(), value))
print("")

vocab, idf = data_helpers.loadVocab(FLAGS.vocab_file)
print(len(vocab))

charVocab = data_helpers.loadCharVocab(FLAGS.char_vocab_file)

SEQ_LEN = FLAGS.max_sequence_length
answer_data = data_helpers.loadAnswers(FLAGS.answer_file, vocab, SEQ_LEN)
test_dataset = data_helpers.loadDataset(FLAGS.test_file, vocab, SEQ_LEN,
                                        answer_data)

target_loss_weight = [1.0, 1.0]

print("\nEvaluating...\n")

# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
print(checkpoint_file)

graph = tf.Graph()
with graph.as_default():
    session_conf = tf.ConfigProto(
        allow_soft_placement=FLAGS.allow_soft_placement,
        log_device_placement=FLAGS.log_device_placement)
Exemple #2
0
print("")

# Data Preparatopn
# ==================================================

# Load data
print("Loading data...")

vocab, idf = data_helpers.loadVocab(FLAGS.vocab_file)
print(len(vocab))

charVocab = data_helpers.loadCharVocab(FLAGS.char_vocab_file)

SEQ_LEN = FLAGS.max_sequence_length
answer_data = data_helpers.loadAnswers(FLAGS.answer_file, vocab, SEQ_LEN)
train_dataset = data_helpers.loadDataset(FLAGS.train_file, vocab, SEQ_LEN,
                                         answer_data)

print('train_pairs: {}'.format(len(train_dataset)))

test_dataset = data_helpers.loadDataset(FLAGS.valid_file, vocab, SEQ_LEN,
                                        answer_data)

target_loss_weight = [1.0, 1.0]

with tf.Graph().as_default():
    with tf.device("/gpu:0"):
        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            log_device_placement=FLAGS.log_device_placement)
        sess = tf.Session(config=session_conf)
        with sess.as_default():
Exemple #3
0
        format(mrr, top_1_precision, total_valid_query))
    print('Top-2 precision: {}'.format(top_2_precision))
    print('Top-5 precision: {}'.format(top_5_precision))
    print('Top-10 precision: {}'.format(top_10_precision))

    return mrr


if __name__ == "__main__":
    # Load fixtures
    print("Loading data...")

    vocab = data_helpers.loadVocab(FLAGS.vocab_file)
    charVocab = data_helpers.loadCharVocab(FLAGS.char_vocab_file)

    answer_data = data_helpers.loadAnswers(FLAGS.answers_file, vocab,
                                           FLAGS.max_sequence_length)
    train_dataset = data_helpers.loadDataset(FLAGS.train_file,
                                             vocab,
                                             FLAGS.max_sequence_length,
                                             answer_data,
                                             do_label_smoothing=True)
    print('train_pairs: {}'.format(len(train_dataset)))
    valid_dataset = data_helpers.loadDataset(FLAGS.valid_file,
                                             vocab,
                                             FLAGS.max_sequence_length,
                                             answer_data,
                                             do_label_smoothing=False)
    print('dev_pairs: {}'.format(len(valid_dataset)))

    train()