예제 #1
0
def train_and_evaluate(hparams):
    """Run the training and evaluate using the high level API."""
    train_input = lambda: model.input_fn(hparams.train_files,
                                         num_epochs=hparams.num_epochs,
                                         batch_size=hparams.train_batch_size)

    # Don't shuffle evaluation data.
    eval_input = lambda: model.input_fn(
        hparams.eval_files, batch_size=hparams.eval_batch_size, shuffle=False)

    train_spec = tf.estimator.TrainSpec(train_input,
                                        max_steps=hparams.train_steps)

    exporter = tf.estimator.FinalExporter(
        'census', model.SERVING_FUNCTIONS[hparams.export_format])
    eval_spec = tf.estimator.EvalSpec(eval_input,
                                      steps=hparams.eval_steps,
                                      exporters=[exporter],
                                      name='census-eval')

    model_fn = model.generate_model_fn(
        embedding_size=hparams.embedding_size,
        # Construct layers sizes with exponential decay.
        hidden_units=[
            max(2, int(hparams.first_layer_size * hparams.scale_factor**i))
            for i in range(hparams.num_layers)
        ],
        learning_rate=hparams.learning_rate)

    estimator = tf.estimator.Estimator(model_fn=model_fn,
                                       model_dir=hparams.job_dir)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
예제 #2
0
def run_experiment(hparams):
    """Run the training and evaluate using the high level API"""

    train_input = lambda: model.input_fn(hparams.train_files,
                                         num_epochs=hparams.num_epochs,
                                         batch_size=hparams.train_batch_size)

    # Don't shuffle evaluation data
    eval_input = lambda: model.input_fn(
        hparams.eval_files, batch_size=hparams.eval_batch_size, shuffle=False)

    # TODO: How should these train/eval spec's be further adjusted for using tf.estimator.train_and_evaluate()?
    train_spec = tf.estimator.TrainSpec(train_input,
                                        max_steps=hparams.train_steps)

    exporter = tf.estimator.FinalExporter(
        'census', model.SERVING_FUNCTIONS[hparams.export_format])
    eval_spec = tf.estimator.EvalSpec(eval_input,
                                      steps=hparams.eval_steps,
                                      exporters=[exporter],
                                      name='census-eval')

    model_fn = model.generate_model_fn(
        embedding_size=hparams.embedding_size,
        # Construct layers sizes with exponetial decay
        hidden_units=[
            max(2, int(hparams.first_layer_size * hparams.scale_factor**i))
            for i in range(hparams.num_layers)
        ],
        learning_rate=hparams.learning_rate)

    # TODO: unclear what config settings are needed for my model
    config = tpu_config.RunConfig(
        master=tpu_grpc_url,
        evaluation_master=tpu_grpc_url,
        model_dir=FLAGS.model_dir,
        cluster=tpu_cluster_resolver,
        tpu_config=tpu_config.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            num_shards=FLAGS.num_cores))

    estimator = tpu_estimator.Estimator(
        use_tpu=True,
        model_fn=model_fn,
        model_dir=hparams.job_dir,
        config=run_config,
        # train_batch_size --> not being passed as I believe this should be handled in the train_spec...
    )
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
예제 #3
0
def train_and_evaluate(args):
    """Run the training and evaluate using the high level API."""
    def train_input():
        """Input function returning batches from the training
        data set from training.
        """
        return model.input_fn(args.train_files,
                              num_epochs=args.num_epochs,
                              batch_size=args.train_batch_size)

    def eval_input():
        """Input function returning the entire validation data
        set for evaluation. Shuffling is not required.
        """
        return model.input_fn(args.eval_files,
                              batch_size=args.eval_batch_size,
                              shuffle=False)

    train_spec = tf.estimator.TrainSpec(train_input,
                                        max_steps=args.train_steps)

    exporter = tf.estimator.FinalExporter(
        'census', model.SERVING_FUNCTIONS[args.export_format])
    eval_spec = tf.estimator.EvalSpec(eval_input,
                                      steps=args.eval_steps,
                                      exporters=[exporter],
                                      name='census-eval')

    model_fn = model.generate_model_fn(
        embedding_size=args.embedding_size,
        # Construct layers sizes with exponential decay.
        hidden_units=[
            max(2, int(args.first_layer_size * args.scale_factor**i))
            for i in range(args.num_layers)
        ],
        learning_rate=args.learning_rate)

    estimator = tf.estimator.Estimator(model_fn=model_fn,
                                       model_dir=args.job_dir)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
예제 #4
0
def train_and_evaluate(args):
  """Run the training and evaluate using the high level API."""
  train_input = lambda: model.input_fn(
      args.train_files,
      num_epochs=args.num_epochs,
      batch_size=args.train_batch_size
  )

  # Don't shuffle evaluation data.
  eval_input = lambda: model.input_fn(
    args.eval_files,
      batch_size=args.eval_batch_size,
      shuffle=False
  )

  train_spec = tf.estimator.TrainSpec(
      train_input, max_steps=args.train_steps)

  exporter = tf.estimator.FinalExporter(
      'census', model.SERVING_FUNCTIONS[args.export_format])
  eval_spec = tf.estimator.EvalSpec(
      eval_input,
      steps=args.eval_steps,
      exporters=[exporter],
      name='census-eval')

  model_fn = model.generate_model_fn(
      embedding_size=args.embedding_size,
      # Construct layers sizes with exponential decay.
      hidden_units=[
          max(2, int(args.first_layer_size * args.scale_factor**i))
          for i in range(args.num_layers)
      ],
      learning_rate=args.learning_rate)

  estimator = tf.estimator.Estimator(
      model_fn=model_fn, model_dir=args.job_dir)
  tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)