Example #1
0
        trainer.initialize(restore=save_path, sess=sess)
        trainer.test_rollouts = 100
        os.mkdir(path_logger_file + "/" + "test_beam")
        trainer.path_logger_file_ = path_logger_file + "/" + "test_beam" + "/paths"

        # save score.txt for Hits@K and accuracy
        with open(output_dir + '/scores.txt', 'a') as score_file:
            score_file.write("Test (beam) scores with best model from " +
                             save_path + "\n")

        trainer.test_environment = trainer.test_test_environment
        trainer.test_environment.test_rollouts = 100
        trainer.test(sess, beam=True, print_paths=True, save_model=False)

        # print options['nell_evaluation']
        if options['nell_evaluation'] == 1:
            current_relation = options['data_input_dir'].split('/')[-2]
            print 'current_relation:', current_relation
            mean_ap, mean_hit_1, mean_hit_3, mean_hit_10, mean_mrr = nell_eval(
                path_logger_file + "/" + "test_beam/" + "pathsanswers",
                trainer.data_input_dir + '/sort_test.pairs')
            with open(
                    trainer.output_dir + '/' + current_relation +
                    '_nell_eval.txt', 'a') as nell_eval_file:
                # nell_eval_file.write('MINERVA MAP: {}'.format(mean_ap))
                nell_eval_file.write('RL MAP: ' + str(mean_ap) + '\n' +
                                     'HITS@1: ' + str(mean_hit_1) + '\n' +
                                     'HITS@3: ' + str(mean_hit_3) + '\n' +
                                     'HITS@10: ' + str(mean_hit_10) + '\n' +
                                     'MRR: ' + str(mean_mrr) + '\n')
Example #2
0
    else:
        logger.info("Skipping training")
        logger.info("Loading model from {}".format(options["model_load_dir"]))

    trainer = Trainer(options)
    if options['load_model']:
        save_path = options['model_load_dir']
        path_logger_file = trainer.path_logger_file
        output_dir = trainer.output_dir
    with tf.Session(config=config) as sess:
        trainer.initialize(restore=save_path, sess=sess)

        trainer.test_rollouts = 100

        os.mkdir(path_logger_file + "/" + "test_beam")
        trainer.path_logger_file_ = path_logger_file + "/" + "test_beam" + "/paths"
        with open(output_dir + '/scores.txt', 'a') as score_file:
            score_file.write("Test (beam) scores with best model from " +
                             save_path + "\n")
        trainer.test_environment = trainer.test_test_environment
        trainer.test_environment.test_rollouts = 100

        trainer.test(sess, beam=True, print_paths=True, save_model=False)

        # trainer.test_environment = trainer.dev_test_environment
        # trainer.test(sess, beam=True, print_paths=True, save_model=False)
        print(options['nell_evaluation'])
        if options['nell_evaluation'] == 1:
            nell_eval(path_logger_file + "/" + "test_beam/" + "pathsanswers",
                      trainer.data_input_dir + '/sort_test.pairs')
Example #3
0
import os
import sys
from code.model.nell_eval import nell_eval

model_answers_path = ""
correct_answers_path = ""
output_path = ""

if __name__ == '__main__':
    # clear results record
    with open(output_path, 'w') as f:
        pass
    relations = os.listdir(tasks_dir_path)
    nell_eval()