def main(unused_argv): hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator(model_fn=model_fn, model_dir=MODEL_DIR, config=tf.contrib.learn.RunConfig()) input_fn_train = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.TRAIN, input_files=[TRAIN_FILE], batch_size=hparams.batch_size, num_epochs=FLAGS.num_epochs) input_fn_eval = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[VALIDATION_FILE], batch_size=hparams.eval_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() eval_monitor = tf.contrib.learn.monitors.ValidationMonitor( input_fn=input_fn_eval, every_n_steps=FLAGS.eval_every, metrics=eval_metrics, early_stopping_metric="recall_at_1", early_stopping_metric_minimize=False, early_stopping_rounds=4000) estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
def main(unused_argv): hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator(model_fn=model_fn, model_dir=MODEL_DIR) input_fn_train = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.TRAIN, input_files=[TRAIN_FILE], batch_size=hparams.batch_size, num_epochs=FLAGS.num_epochs) input_fn_eval = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[VALIDATION_FILE], batch_size=hparams.eval_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() eval_monitor = tf.contrib.learn.monitors.ValidationMonitor( input_fn=input_fn_eval, every_n_steps=FLAGS.eval_every, metrics=eval_metrics) dbg_hook = tfdbg.LocalCLIDebugHook() estimator.fit(input_fn=input_fn_train, steps=FLAGS.num_steps, monitors=[eval_monitor])
def main(unused_argv): hparams = udc_hparams.create_hparams() writer = tf.summary.FileWriter(FLAGS.logdir) model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator(model_fn=model_fn, model_dir=MODEL_DIR, config=tf.contrib.learn.RunConfig()) input_fn_train = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.TRAIN, input_files=[TRAIN_FILE], batch_size=hparams.batch_size, num_epochs=FLAGS.num_epochs) input_fn_eval = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[VALIDATION_FILE], batch_size=hparams.eval_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() eval_monitor = tf.contrib.learn.monitors.ValidationMonitor( input_fn=input_fn_eval, every_n_steps=FLAGS.eval_every, metrics=eval_metrics) estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
def main(unused_argv): hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model) estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=MODEL_DIR, config=tf.estimator.RunConfig()) input_fn_train = udc_inputs.create_input_fn( mode=tf.estimator.ModeKeys.TRAIN, input_files=[TRAIN_FILE], batch_size=hparams.batch_size, num_epochs=FLAGS.num_epochs) input_fn_eval = udc_inputs.create_input_fn( mode=tf.estimator.ModeKeys.EVAL, input_files=[VALIDATION_FILE], batch_size=hparams.eval_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() estimator.train(input_fn=input_fn_train, steps=FLAGS.num_epochs)
def main(unused_argv): hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn( hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator( model_fn=model_fn, model_dir=MODEL_DIR, config=tf.contrib.learn.RunConfig()) input_fn_train = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.TRAIN, input_files=[TRAIN_FILE], batch_size=hparams.batch_size, num_epochs=FLAGS.num_epochs) input_fn_eval = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[VALIDATION_FILE], batch_size=hparams.eval_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() eval_monitor = tf.contrib.learn.monitors.ValidationMonitor( input_fn=input_fn_eval, every_n_steps=FLAGS.eval_every, metrics=eval_metrics) estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
def main(unused_argv): hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn( hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator( model_fn=model_fn, model_dir=MODEL_DIR, config=tf.contrib.learn.RunConfig()) input_fn_train = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.TRAIN, input_files=[TRAIN_FILE], batch_size=hparams.batch_size, num_epochs=FLAGS.num_epochs) input_fn_eval = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[VALIDATION_FILE], batch_size=hparams.eval_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() # We need to subclass theis manually for now. The next TF version will # have support ValidationMonitors with metrics built-in. # It's already on the master branch. class EvaluationMonitor(tf.contrib.learn.monitors.EveryN): def every_n_step_end(self, step, outputs): self._estimator.evaluate( input_fn=input_fn_eval, metrics=eval_metrics, steps=None) eval_monitor = EvaluationMonitor(every_n_steps=FLAGS.eval_every) estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
def main(unused_argv): hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator(model_fn=model_fn, model_dir=MODEL_DIR, config=tf.contrib.learn.RunConfig()) input_fn_train = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.TRAIN, input_files=[TRAIN_FILE], batch_size=hparams.batch_size, num_epochs=FLAGS.num_epochs) input_fn_eval = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[VALIDATION_FILE], batch_size=hparams.eval_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() # We need to subclass theis manually for now. The next TF version will # have support ValidationMonitors with metrics built-in. # It's already on the master branch. class EvaluationMonitor(tf.contrib.learn.monitors.EveryN): def every_n_step_end(self, step, outputs): self._estimator.evaluate(input_fn=input_fn_eval, metrics=eval_metrics, steps=None) eval_monitor = EvaluationMonitor(every_n_steps=FLAGS.eval_every, first_n_steps=-1) estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
from models.dual_encoder import dual_encoder_model tf.flags.DEFINE_string("test_file", "./data/test.tfrecords", "Path of test data in TFRecords format") tf.flags.DEFINE_string("model_dir", None, "Directory to load model checkpoints from") tf.flags.DEFINE_integer("loglevel", 20, "Tensorflow log level") tf.flags.DEFINE_integer("test_batch_size", 16, "Batch size for testing") FLAGS = tf.flags.FLAGS if not FLAGS.model_dir: print("You must specify a model directory") sys.exit(1) tf.logging.set_verbosity(FLAGS.loglevel) if __name__ == "__main__": hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator( model_fn=model_fn, model_dir=FLAGS.model_dir, config=tf.contrib.learn.RunConfig()) input_fn_test = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[FLAGS.test_file], batch_size=FLAGS.test_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() estimator.evaluate(input_fn=input_fn_test, steps=None, metrics=eval_metrics)
tf.flags.DEFINE_string("test_file", "./data/persona/test.tfrecords", "Path of test data in TFRecords format") tf.flags.DEFINE_string("model_dir", "./runs/1542774662", "Directory to load model checkpoints from") tf.flags.DEFINE_integer("loglevel", 20, "Tensorflow log level") tf.flags.DEFINE_integer("test_batch_size", 8, "Batch size for testing") FLAGS = tf.flags.FLAGS if not FLAGS.model_dir: print("You must specify a model directory") sys.exit(1) tf.logging.set_verbosity(FLAGS.loglevel) if __name__ == "__main__": hparams = udc_hparams.create_hparams() model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model) estimator = tf.contrib.learn.Estimator( model_fn=model_fn, model_dir=FLAGS.model_dir, config=tf.contrib.learn.RunConfig()) input_fn_test = udc_inputs.create_input_fn( mode=tf.contrib.learn.ModeKeys.EVAL, input_files=[FLAGS.test_file], batch_size=FLAGS.test_batch_size, num_epochs=1) eval_metrics = udc_metrics.create_evaluation_metrics() estimator.evaluate(input_fn=input_fn_test, steps=None, metrics=eval_metrics)