def do_start_inference(out_dir, hparams): # Silence all outputs #os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' global current_stdout current_stdout = sys.stdout sys.stdout = open(os.devnull, "w") # Modified autorun from nmt.py (bottom of the file) # We want to use original argument parser (for validation, etc) nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) # But we have to hack settings from our config in there instead of commandline options flags, unparsed = nmt_parser.parse_known_args( ['--' + k + '=' + str(v) for k, v in hparams.items()]) # And now we can run TF with modified arguments #tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed) # Add output (model) folder to flags flags.out_dir = out_dir # Make hparams hparams = nmt.create_hparams(flags) ## Train / Decode if not tf.gfile.Exists(flags.out_dir): nmt.utils.print_out("# Model folder (out_dir) doesn't exist") sys.exit() # Load hparams from model folder hparams = nmt.create_or_load_hparams(flags.out_dir, hparams, flags.hparams_path, save_hparams=True) # Choose checkpoint (provided with hparams or last one) if not flags.ckpt: flags.ckpt = tf.train.latest_checkpoint(flags.out_dir) # Create model if not hparams.attention: model_creator = nmt.inference.nmt_model.Model elif hparams.attention_architecture == "standard": model_creator = nmt.inference.attention_model.AttentionModel elif hparams.attention_architecture in ["gnmt", "gnmt_v2"]: model_creator = nmt.inference.gnmt_model.GNMTModel else: raise ValueError("Unknown model architecture") infer_model = nmt.inference.model_helper.create_infer_model( model_creator, hparams, None) return (infer_model, flags, hparams)
def setup_inference_parameters(out_dir, hparams): # Print output on stdout while temporarily sending other output to /dev/null global current_stdout current_stdout = sys.stdout sys.stdout = open(os.devnull, "w") nmt_parser = ap.ArgumentParser() nmt.add_arguments(nmt_parser) # Get settingds from configuration file flags, unparsed = nmt_parser.parse_known_args( ['--' + key + '=' + str(value) for key, value in hparams.items()]) # Add output (model) folder to flags flags.out_dir = out_dir ## Exit if model folder doesn't exist if not tf.gfile.Exists(flags.out_dir): nmt.utils.print_out("# Model folder (out_dir) doesn't exist") sys.exit() # Load hyper parameters (hparams) from model folder hparams = nmt.create_hparams(flags) hparams = nmt.create_or_load_hparams(flags.out_dir, hparams, flags.hparams_path, save_hparams=True) # Choose checkpoint (provided with hparams or last one) if not flags.ckpt: flags.ckpt = tf.train.latest_checkpoint(flags.out_dir) # Create model if not hparams.attention: model_creator = nmt.inference.nmt_model.Model elif hparams.attention_architecture == "standard": model_creator = nmt.inference.attention_model.AttentionModel elif hparams.attention_architecture in ["gnmt", "gnmt_v2"]: model_creator = nmt.inference.gnmt_model.GNMTModel else: raise ValueError("Unknown model architecture") infer_model = nmt.inference.model_helper.create_infer_model( model_creator, hparams, None) return (infer_model, flags, hparams)
def do_start_inference(out_dir, hparams): global current_stdout current_stdout = sys.stdout sys.stdout = open(os.devnull, "w") # Modified autorun from nmt.py (bottom of the file) # We want to use original argument parser nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) flags, unparsed = nmt_parser.parse_known_args(['--'+k+'='+str(v) for k,v in hparams.items()]) # Add output (model) folder to flags flags.out_dir = out_dir # Make hparams hparams = nmt.create_hparams(flags) if not tf.gfile.Exists(flags.out_dir): nmt.utils.print_out("# Model folder (out_dir) doesn't exist") sys.exit() # Load hparams from model folder hparams = nmt.create_or_load_hparams(flags.out_dir, hparams, flags.hparams_path, save_hparams=True) if not flags.ckpt: flags.ckpt = tf.train.latest_checkpoint(flags.out_dir) if not hparams.attention: model_creator = nmt.inference.nmt_model.Model elif hparams.attention_architecture == "standard": model_creator = nmt.inference.attention_model.AttentionModel elif hparams.attention_architecture in ["gnmt", "gnmt_v2"]: model_creator = nmt.inference.gnmt_model.GNMTModel else: raise ValueError("Unknown model architecture") infer_model = nmt.inference.model_helper.create_infer_model(model_creator, hparams, None) return (infer_model, flags, hparams)
import sys import os import argparse from settings import hparams sys.path.append(os.path.realpath(os.path.dirname(__file__))) sys.path.append(os.path.realpath(os.path.dirname(__file__)) + "/nmt") from nmt import nmt import tensorflow as tf # Modified autorun from nmt.py (bottom of the file) # We want to use original argument parser (for validation, etc) nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) # But we have to hack settings from our config in there instead of commandline options nmt.FLAGS, unparsed = nmt_parser.parse_known_args( ['--' + k + '=' + str(v) for k, v in hparams.items()]) # And now we can run TF with modified arguments tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed)