示例#1
0
def main(unused_argv):
    hparams = udc_hparams.create_hparams()
    writer = tf.summary.FileWriter(FLAGS.logdir)

    model_fn = udc_model.create_model_fn(hparams,
                                         model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(model_fn=model_fn,
                                           model_dir=MODEL_DIR,
                                           config=tf.contrib.learn.RunConfig())

    input_fn_train = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=hparams.batch_size,
        num_epochs=FLAGS.num_epochs)

    input_fn_eval = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.EVAL,
        input_files=[VALIDATION_FILE],
        batch_size=hparams.eval_batch_size,
        num_epochs=1)

    eval_metrics = udc_metrics.create_evaluation_metrics()

    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics)

    estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
示例#2
0
def main(unused_argv):
    hparams = udc_hparams.create_hparams()

    model_fn = udc_model.create_model_fn(hparams,
                                         model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(model_fn=model_fn,
                                           model_dir=MODEL_DIR,
                                           config=tf.contrib.learn.RunConfig())

    input_fn_train = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=hparams.batch_size,
        num_epochs=FLAGS.num_epochs)

    input_fn_eval = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.EVAL,
        input_files=[VALIDATION_FILE],
        batch_size=hparams.eval_batch_size,
        num_epochs=1)

    eval_metrics = udc_metrics.create_evaluation_metrics()

    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics,
        early_stopping_metric="recall_at_1",
        early_stopping_metric_minimize=False,
        early_stopping_rounds=4000)

    estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
示例#3
0
def main(unused_argv):
    hparams = udc_hparams.create_hparams()

    model_fn = udc_model.create_model_fn(hparams,
                                         model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(model_fn=model_fn,
                                           model_dir=MODEL_DIR)

    input_fn_train = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=hparams.batch_size,
        num_epochs=FLAGS.num_epochs)

    input_fn_eval = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.EVAL,
        input_files=[VALIDATION_FILE],
        batch_size=hparams.eval_batch_size,
        num_epochs=1)

    eval_metrics = udc_metrics.create_evaluation_metrics()

    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics)

    dbg_hook = tfdbg.LocalCLIDebugHook()
    estimator.fit(input_fn=input_fn_train,
                  steps=FLAGS.num_steps,
                  monitors=[eval_monitor])
def main(unused_argv):
    hparams = udc_hparams.create_hparams()

    model_fn = udc_model.create_model_fn(hparams,
                                         model_impl=dual_encoder_model)

    estimator = tf.estimator.Estimator(model_fn=model_fn,
                                       model_dir=MODEL_DIR,
                                       config=tf.estimator.RunConfig())

    input_fn_train = udc_inputs.create_input_fn(
        mode=tf.estimator.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=hparams.batch_size,
        num_epochs=FLAGS.num_epochs)

    input_fn_eval = udc_inputs.create_input_fn(
        mode=tf.estimator.ModeKeys.EVAL,
        input_files=[VALIDATION_FILE],
        batch_size=hparams.eval_batch_size,
        num_epochs=1)

    eval_metrics = udc_metrics.create_evaluation_metrics()

    estimator.train(input_fn=input_fn_train, steps=FLAGS.num_epochs)
示例#5
0
def main(unused_argv):
  hparams = udc_hparams.create_hparams()

  model_fn = udc_model.create_model_fn(
    hparams,
    model_impl=dual_encoder_model)

  estimator = tf.contrib.learn.Estimator(
    model_fn=model_fn,
    model_dir=MODEL_DIR,
    config=tf.contrib.learn.RunConfig())

  input_fn_train = udc_inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.TRAIN,
    input_files=[TRAIN_FILE],
    batch_size=hparams.batch_size,
    num_epochs=FLAGS.num_epochs)

  input_fn_eval = udc_inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.EVAL,
    input_files=[VALIDATION_FILE],
    batch_size=hparams.eval_batch_size,
    num_epochs=1)

  eval_metrics = udc_metrics.create_evaluation_metrics()
  
  eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics)

  estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
def main(unused_argv):
    # 获取udc_hparams文件设置参数
    hparams = udc_hparams.create_hparams()

    # 设置模型方法,传递参数和dual_encoder_model
    model_fn = udc_model.create_model_fn(
        hparams,
        model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(
        model_fn=model_fn,
        model_dir=MODEL_DIR,
        config=tf.contrib.learn.RunConfig())

    '''
    1. Create a feature definition that describes the fields in our Example file
    2. Read records from the input_files with tf.TFRecordReader
    3. Parse the records according to the feature definition
    4. Extract the training labels
    5. Batch multiple examples and training labels
    6. Return the batched examples and training labels
    '''

    input_fn_train = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=hparams.batch_size,
        num_epochs=FLAGS.num_epochs)

    input_fn_eval = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.EVAL,
        input_files=[VALIDATION_FILE],
        batch_size=hparams.eval_batch_size,
        num_epochs=1)

    # eval_metrics = udc_metrics.create_evaluation_metrics()
    eval_metrics = None

    """Runs evaluation of a given estimator, at most every N steps.

      Note that the evaluation is done based on the saved checkpoint, which will
      usually be older than the current step.

      Can do early stopping on validation metrics if `early_stopping_rounds` is
      provided.
      """
    eval_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        input_fn=input_fn_eval,
        every_n_steps=FLAGS.eval_every,
        metrics=eval_metrics)

    estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
示例#7
0
def main(unused_argv):
  hparams = udc_hparams.create_hparams()

  model_fn = udc_model.create_model_fn(
    hparams,
    model_impl=dual_encoder_model)

  estimator = tf.contrib.learn.Estimator(
    model_fn=model_fn,
    model_dir=MODEL_DIR,
    config=tf.contrib.learn.RunConfig())

  input_fn_train = udc_inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.TRAIN,
    input_files=[TRAIN_FILE],
    batch_size=hparams.batch_size,
    num_epochs=FLAGS.num_epochs)

  input_fn_eval = udc_inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.EVAL,
    input_files=[VALIDATION_FILE],
    batch_size=hparams.eval_batch_size,
    num_epochs=1)

  eval_metrics = udc_metrics.create_evaluation_metrics()

  # We need to subclass theis manually for now. The next TF version will
  # have support ValidationMonitors with metrics built-in.
  # It's already on the master branch.
  class EvaluationMonitor(tf.contrib.learn.monitors.EveryN):
    def every_n_step_end(self, step, outputs):
      self._estimator.evaluate(
        input_fn=input_fn_eval,
        metrics=eval_metrics,
        steps=None)

  eval_monitor = EvaluationMonitor(every_n_steps=FLAGS.eval_every)
  estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
示例#8
0
def main(unused_argv):
    hparams = udc_hparams.create_hparams()

    model_fn = udc_model.create_model_fn(hparams,
                                         model_impl=dual_encoder_model)

    estimator = tf.contrib.learn.Estimator(model_fn=model_fn,
                                           model_dir=MODEL_DIR,
                                           config=tf.contrib.learn.RunConfig())

    input_fn_train = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.TRAIN,
        input_files=[TRAIN_FILE],
        batch_size=hparams.batch_size,
        num_epochs=FLAGS.num_epochs)

    input_fn_eval = udc_inputs.create_input_fn(
        mode=tf.contrib.learn.ModeKeys.EVAL,
        input_files=[VALIDATION_FILE],
        batch_size=hparams.eval_batch_size,
        num_epochs=1)

    eval_metrics = udc_metrics.create_evaluation_metrics()

    # We need to subclass theis manually for now. The next TF version will
    # have support ValidationMonitors with metrics built-in.
    # It's already on the master branch.
    class EvaluationMonitor(tf.contrib.learn.monitors.EveryN):
        def every_n_step_end(self, step, outputs):
            self._estimator.evaluate(input_fn=input_fn_eval,
                                     metrics=eval_metrics,
                                     steps=None)

    eval_monitor = EvaluationMonitor(every_n_steps=FLAGS.eval_every,
                                     first_n_steps=-1)
    estimator.fit(input_fn=input_fn_train, steps=None, monitors=[eval_monitor])
示例#9
0
from models.dual_encoder import dual_encoder_model

tf.flags.DEFINE_string("test_file", "./data/test.tfrecords", "Path of test data in TFRecords format")
tf.flags.DEFINE_string("model_dir", None, "Directory to load model checkpoints from")
tf.flags.DEFINE_integer("loglevel", 20, "Tensorflow log level")
tf.flags.DEFINE_integer("test_batch_size", 16, "Batch size for testing")
FLAGS = tf.flags.FLAGS

if not FLAGS.model_dir:
  print("You must specify a model directory")
  sys.exit(1)

tf.logging.set_verbosity(FLAGS.loglevel)

if __name__ == "__main__":
  hparams = udc_hparams.create_hparams()
  model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model)
  estimator = tf.contrib.learn.Estimator(
    model_fn=model_fn,
    model_dir=FLAGS.model_dir,
    config=tf.contrib.learn.RunConfig())

  input_fn_test = udc_inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.EVAL,
    input_files=[FLAGS.test_file],
    batch_size=FLAGS.test_batch_size,
    num_epochs=1)

  eval_metrics = udc_metrics.create_evaluation_metrics()
  estimator.evaluate(input_fn=input_fn_test, steps=None, metrics=eval_metrics)
示例#10
0

tf.flags.DEFINE_string("test_file", "./data/persona/test.tfrecords", "Path of test data in TFRecords format")
tf.flags.DEFINE_string("model_dir", "./runs/1542774662", "Directory to load model checkpoints from")
tf.flags.DEFINE_integer("loglevel", 20, "Tensorflow log level")
tf.flags.DEFINE_integer("test_batch_size", 8, "Batch size for testing")
FLAGS = tf.flags.FLAGS

if not FLAGS.model_dir:
  print("You must specify a model directory")
  sys.exit(1)

tf.logging.set_verbosity(FLAGS.loglevel)

if __name__ == "__main__":
  hparams = udc_hparams.create_hparams()
  model_fn = udc_model.create_model_fn(hparams, model_impl=dual_encoder_model)
  estimator = tf.contrib.learn.Estimator(
    model_fn=model_fn,
    model_dir=FLAGS.model_dir,
    config=tf.contrib.learn.RunConfig())

  input_fn_test = udc_inputs.create_input_fn(
    mode=tf.contrib.learn.ModeKeys.EVAL,
    input_files=[FLAGS.test_file],
    batch_size=FLAGS.test_batch_size,
    num_epochs=1)

  eval_metrics = udc_metrics.create_evaluation_metrics()
  estimator.evaluate(input_fn=input_fn_test, steps=None, metrics=eval_metrics)