Esempio n. 1
0
def test(sess, m_valid):
  m_valid.restore(sess)
  fetches = [m_valid.accuracy, m_valid.prediction]
  accuracy, predictions = sess.run(fetches)
  print('accuracy: %.4f' % accuracy)
  
  base_reader.write_results(predictions, FLAGS.relations_file, FLAGS.results_file)
Esempio n. 2
0
def evaluation(sess, test_model, data):
    acc_count = 0
    step = 0
    predict = []
    for batch in data:
        step = step + 1
        acc, pre, lable = test_model.run_iter(sess, batch, Training=False)
        predict.extend(pre)
        acc_count += acc
    #print(predict)
    base_reader.write_results(predict, cfg.relations_file, cfg.results_file)
    return acc_count / (step * cfg.batch_size)
Esempio n. 3
0
def evaluation(sess, test_model, data):
    acc_count = 0
    step = 0
    predict = []
    #<##>=================
    target = []
    #</##>================
    for batch in data:
        step = step + 1
        acc, pre, lable = test_model.run_iter(sess, batch, Training=False)
        predict.extend(pre)
        #<##>=================
        target.extend(lable)
        #</##>=================
        acc_count += acc
    #print(predict)
    relations_file_path = os.path.join(data_path, cfg.relations_file)
    results_file_path = os.path.join(out_path, cfg.results_file)
    base_reader.write_results(predict, relations_file_path, results_file_path)
    #<##>=================
    base_reader.write_results(target, relations_file_path, 'out/test_keys.txt')
    #</##>=================
    #base_reader.write_results(predict, cfg.relations_file, cfg.results_file)
    return acc_count / (step * cfg.batch_size)