def testTrain(self): """Test the training loop is functional with basic hparams.""" nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) flags, _ = nmt_parser.parse_known_args() update_flags(flags, "nmt_train_test") default_hparams = nmt.create_hparams(flags) nmt.run_main(flags, default_hparams, estimator.train_and_eval_fn)
def testTrain(self): """Test the training loop is functional with basic hparams.""" nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() _update_flags(FLAGS, "nmt_train_test") default_hparams = nmt.create_hparams(FLAGS) train_fn = train.train nmt.run_main(FLAGS, default_hparams, train_fn, None)
def testTrainInputFn(self): nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) flags, _ = nmt_parser.parse_known_args() update_flags(flags, "input_fn_test") default_hparams = nmt.create_hparams(flags) hparams = nmt.extend_hparams(default_hparams) with self.test_session() as sess: input_fn = make_input_fn(hparams, tf.contrib.learn.ModeKeys.TRAIN) outputs = input_fn({}) sess.run(tf.tables_initializer()) iterator = outputs.make_initializable_iterator() sess.run(iterator.initializer) features = sess.run(iterator.get_next()) tf.logging.info("source: %s", features["source"]) tf.logging.info("target_input: %s", features["target_input"]) tf.logging.info("target_output: %s", features["target_output"]) tf.logging.info("source_sequence_length: %s", features["source_sequence_length"]) tf.logging.info("target_sequence_length: %s", features["target_sequence_length"])
def testInference(self): """Test inference is function with basic hparams.""" nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() _update_flags(FLAGS, "nmt_train_infer") # Train one step so we have a checkpoint. FLAGS.num_train_steps = 1 default_hparams = nmt.create_hparams(FLAGS) train_fn = train.train nmt.run_main(FLAGS, default_hparams, train_fn, None) # Update FLAGS for inference. FLAGS.inference_input_file = ("nmt/testdata/" "iwslt15.tst2013.100.en") FLAGS.inference_output_file = os.path.join(FLAGS.out_dir, "output") FLAGS.inference_ref_file = ("nmt/testdata/" "iwslt15.tst2013.100.vi") default_hparams = nmt.create_hparams(FLAGS) inference_fn = inference.inference nmt.run_main(FLAGS, default_hparams, None, inference_fn)
# Once in a while, we print statistics. if global_step - last_stats_step >= steps_per_stats: last_stats_step = global_step is_overflow = train.process_stats(stats, info, global_step, steps_per_stats, log_f) train.print_step_info(" ", global_step, info, train._get_best_results(hparams), log_f) if is_overflow: break # Reset statistics stats = train.init_stats() sess, num_workers, worker_id, num_replicas_per_worker = \ parallax.parallel_run(train_model.graph, FLAGS.resource_info_file, sync=FLAGS.sync, parallax_config=parallax_config.build_config()) run(sess, num_workers, worker_id, num_replicas_per_worker) if __name__ == "__main__": import logging logging.getLogger("tensorflow").setLevel(logging.DEBUG) nmt_parser = argparse.ArgumentParser() nmt.add_arguments(nmt_parser) add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)