def parse_options(self, ckpt_path, hparams_path, vocab_prefix, outdir, batch_size): FLAGS = None # TBD remove argument parsing, and just have it return all default values. nmt_parser = argparse.ArgumentParser() add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() # Some of these flags are never used and are just set for consistency FLAGS.num_workers = 1 FLAGS.iterations = 1 FLAGS.infer_batch_size = batch_size FLAGS.num_inter_threads = 1 FLAGS.num_intra_threads = 1 FLAGS.run = "accuracy" # Needs to be set to accuracy to generate output # Pass in inference specific flags FLAGS.ckpt = ckpt_path FLAGS.src = 'en' FLAGS.tgt = 'de' FLAGS.hparams_path = hparams_path FLAGS.out_dir = outdir FLAGS.vocab_prefix = vocab_prefix return FLAGS
def nmt_train(): nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) nmt.FLAGS, unparsed = nmt_parser.parse_known_args(['--'+k+'='+str(v) for k,v in hparams.items()]) nmt.summary_callback = custom_summary # Run TF with modified arguments tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed)
def nmt_train(): # Modified autorun from nmt.py (bottom of the file) # We want to use original argument parser (for validation, etc) nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) # But we have to hack settings from our config in there instead of commandline options nmt.FLAGS, unparsed = nmt_parser.parse_known_args(['--'+k+'='+str(v) for k,v in hparams.items()]) # And now we can run TF with modified arguments tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed)
def chpt_to_dict_arrays(): """ Convert a checkpoint into a dictionary of numpy arrays for later use in TensorRT NMT sample. git clone https://github.com/tensorflow/nmt.git """ sys.path.append("./nmt") from nmt.nmt import add_arguments, create_hparams from nmt import attention_model from nmt import model_helper from nmt.nmt import create_or_load_hparams from nmt import utils from nmt import model as nmt_model nmt_parser = argparse.ArgumentParser() add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() default_hparams = create_hparams(FLAGS) hparams = create_or_load_hparams(FLAGS.out_dir, default_hparams, FLAGS.hparams_path, save_hparams=False) print(hparams) model_creator = None if not hparams.attention: model_creator = nmt_model.Model elif hparams.attention_architecture == "standard": model_creator = attention_model.AttentionModel else: raise ValueError("Unknown model architecture") infer_model = model_helper.create_infer_model(model_creator, hparams, scope=None) params = {} print("\nFound the following trainable variables:") with tf.Session(graph=infer_model.graph, config=utils.misc_utils.get_config_proto()) as sess: loaded_infer_model = model_helper.load_model(infer_model.model, FLAGS.ckpt, sess, "infer") variables = tf.trainable_variables() for v in variables: params[v.name] = v.eval(session=sess) print("{0} {1}".format(v.name, params[v.name].shape)) params["forget_bias"] = hparams.forget_bias return params
def nmt_train(): nmt_parser = ap.ArgumentParser() nmt.add_arguments(nmt_parser) # Get settingds from configuration file nmt.FLAGS, unparsed = nmt_parser.parse_known_args(['--' + key + '=' + str(value) for key, value in hparams.items()]) # Custom summary callback hook nmt.summary_callback = custom_summary # Run tensorflow with modified arguments tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed)
def testTrain(self): """Test the training loop is functional with basic hparams.""" nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() _update_flags(FLAGS, "nmt_train_test") default_hparams = nmt.create_hparams(FLAGS) train_fn = train.train nmt.run_main(FLAGS, default_hparams, train_fn, None)
def do_start_inference(out_dir, hparams): # Silence all outputs #os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' global current_stdout current_stdout = sys.stdout sys.stdout = open(os.devnull, "w") # Modified autorun from nmt.py (bottom of the file) # We want to use original argument parser (for validation, etc) nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) # But we have to hack settings from our config in there instead of commandline options flags, unparsed = nmt_parser.parse_known_args( ['--' + k + '=' + str(v) for k, v in hparams.items()]) # And now we can run TF with modified arguments #tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed) # Add output (model) folder to flags flags.out_dir = out_dir # Make hparams hparams = nmt.create_hparams(flags) ## Train / Decode if not tf.gfile.Exists(flags.out_dir): nmt.utils.print_out("# Model folder (out_dir) doesn't exist") sys.exit() # Load hparams from model folder hparams = nmt.create_or_load_hparams(flags.out_dir, hparams, flags.hparams_path, save_hparams=True) # Choose checkpoint (provided with hparams or last one) if not flags.ckpt: flags.ckpt = tf.train.latest_checkpoint(flags.out_dir) # Create model if not hparams.attention: model_creator = nmt.inference.nmt_model.Model elif hparams.attention_architecture == "standard": model_creator = nmt.inference.attention_model.AttentionModel elif hparams.attention_architecture in ["gnmt", "gnmt_v2"]: model_creator = nmt.inference.gnmt_model.GNMTModel else: raise ValueError("Unknown model architecture") infer_model = nmt.inference.model_helper.create_infer_model( model_creator, hparams, None) return (infer_model, flags, hparams)
def setup_inference_parameters(out_dir, hparams): # Print output on stdout while temporarily sending other output to /dev/null global current_stdout current_stdout = sys.stdout sys.stdout = open(os.devnull, "w") nmt_parser = ap.ArgumentParser() nmt.add_arguments(nmt_parser) # Get settingds from configuration file flags, unparsed = nmt_parser.parse_known_args( ['--' + key + '=' + str(value) for key, value in hparams.items()]) # Add output (model) folder to flags flags.out_dir = out_dir ## Exit if model folder doesn't exist if not tf.gfile.Exists(flags.out_dir): nmt.utils.print_out("# Model folder (out_dir) doesn't exist") sys.exit() # Load hyper parameters (hparams) from model folder hparams = nmt.create_hparams(flags) hparams = nmt.create_or_load_hparams(flags.out_dir, hparams, flags.hparams_path, save_hparams=True) # Choose checkpoint (provided with hparams or last one) if not flags.ckpt: flags.ckpt = tf.train.latest_checkpoint(flags.out_dir) # Create model if not hparams.attention: model_creator = nmt.inference.nmt_model.Model elif hparams.attention_architecture == "standard": model_creator = nmt.inference.attention_model.AttentionModel elif hparams.attention_architecture in ["gnmt", "gnmt_v2"]: model_creator = nmt.inference.gnmt_model.GNMTModel else: raise ValueError("Unknown model architecture") infer_model = nmt.inference.model_helper.create_infer_model( model_creator, hparams, None) return (infer_model, flags, hparams)
def do_start_inference(out_dir, hparams): global current_stdout current_stdout = sys.stdout sys.stdout = open(os.devnull, "w") # Modified autorun from nmt.py (bottom of the file) # We want to use original argument parser nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) flags, unparsed = nmt_parser.parse_known_args(['--'+k+'='+str(v) for k,v in hparams.items()]) # Add output (model) folder to flags flags.out_dir = out_dir # Make hparams hparams = nmt.create_hparams(flags) if not tf.gfile.Exists(flags.out_dir): nmt.utils.print_out("# Model folder (out_dir) doesn't exist") sys.exit() # Load hparams from model folder hparams = nmt.create_or_load_hparams(flags.out_dir, hparams, flags.hparams_path, save_hparams=True) if not flags.ckpt: flags.ckpt = tf.train.latest_checkpoint(flags.out_dir) if not hparams.attention: model_creator = nmt.inference.nmt_model.Model elif hparams.attention_architecture == "standard": model_creator = nmt.inference.attention_model.AttentionModel elif hparams.attention_architecture in ["gnmt", "gnmt_v2"]: model_creator = nmt.inference.gnmt_model.GNMTModel else: raise ValueError("Unknown model architecture") infer_model = nmt.inference.model_helper.create_infer_model(model_creator, hparams, None) return (infer_model, flags, hparams)
def testInference(self): """Test inference is function with basic hparams.""" nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() _update_flags(FLAGS, "nmt_train_infer") # Train one step so we have a checkpoint. FLAGS.num_train_steps = 1 default_hparams = nmt.create_hparams(FLAGS) train_fn = train.train nmt.run_main(FLAGS, default_hparams, train_fn, None) # Update FLAGS for inference. FLAGS.inference_input_file = ("nmt/testdata/" "iwslt15.tst2013.100.en") FLAGS.inference_output_file = os.path.join(FLAGS.out_dir, "output") FLAGS.inference_ref_file = ("nmt/testdata/" "iwslt15.tst2013.100.vi") default_hparams = nmt.create_hparams(FLAGS) inference_fn = inference.inference nmt.run_main(FLAGS, default_hparams, None, inference_fn)
import sys import os import argparse from setup.settings import hparams sys.path.append(os.path.realpath(os.path.dirname(__file__))) sys.path.append(os.path.realpath(os.path.dirname(__file__)) + "/nmt") from nmt import nmt import tensorflow as tf # Modified autorun from nmt.py (bottom of the file) # We want to use original argument parser (for validation, etc) nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) # But we have to hack settings from our config in there instead of commandline options nmt.FLAGS, unparsed = nmt_parser.parse_known_args(['--'+k+'='+str(v) for k,v in hparams.items()]) # And now we can run TF with modified arguments tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) nmt.add_arguments(parser) nmt.FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=nmt.main, argv=[sys.argv[0]] + unparsed)
import sys sys.path.insert(0, '../') # This is required to import common # If importing common doesn't go well, check the Python interpreter's current working directory. # This has to be 'chat' folder. # import os # print(os.getcwd()) # print current working directory #!flask/bin/python from infer_web import app from infer_web import controller import argparse import nmt.nmt as nmt parser = argparse.ArgumentParser() nmt.add_arguments(parser) FLAGS, unparsed = parser.parse_known_args() if not FLAGS.out_dir: # FLAGS.out_dir = "/Users/ryuji/prg/aplac/chat/generated/4_2316/model" # MacBookAir13 FLAGS.out_dir = "/home/apps/prg/aplac/chat/generated/4_2316/model" # AWS EC2 controller.init(FLAGS) #Do not add debug=True when VSCode is used. Otherwise breakpoint doesn't hit. #app.run(debug=True) if __name__ == '__main__': app.run()