Example #1
0
def evaluate_score():
  predictor = algos_factory.gen_predictor(FLAGS.algo)
  score = predictor.init_predict(TEXT_MAX_WORDS)
  tf.add_to_collection('score', score)
  predictor.load(FLAGS.model_dir)
  step = melt.get_model_step_from_dir(FLAGS.model_dir) 
  model_dir, _ = melt.get_model_dir_and_path(FLAGS.model_dir)
  print('step', step, file=sys.stderr)
  print('model_dir', model_dir)
  #melt.save_model(melt.get_session(), FLAGS.model_dir, step + 1)
  melt.save_model(melt.get_session(), model_dir, step + 1)
from __future__ import division
from __future__ import print_function

import sys
import os

import melt
import gezi

assert melt.get_num_gpus() == 1

try:
  model_dir = sys.argv[1] 
except Exception:
  model_dir = './'
model_dir, model_path = melt.get_model_dir_and_path(model_dir)

result_file = model_path + '.evaluate-inference.txt'

print('model_dir', model_dir, 'result_file', result_file, file=sys.stderr)

arg2 = ''
if len(sys.argv) > 2:
  arg2 = sys.argv[2]

if not gezi.non_empty(result_file) or len(open(result_file).readlines()) != 30000:
  command = 'python /home/gezi/mine/hasky/deepiu/image_caption/inference/ai-challenger/evaluate-inference.py %s %s' % (model_path, arg2)
  print(command, file=sys.stderr)
  os.system(command)
else:
  print('%s exists' % result_file)
Example #3
0
def convert(model_dir, meta_graph):
    model_dir, model_path = melt.get_model_dir_and_path(model_dir)
    if not meta_graph:
        meta_graph = '%s/graph.meta' % model_dir