コード例 #1
0
def create_config(model_dir, hparams=None):
  """Creates config instance."""
  tf.logging.info("model_dir = " + model_dir)
  assert model_dir

  if hparams:
    tf.logging.info("Given override cfg:\n%s" % pprint.pformat(hparams))
  else:
    hparams = dict()

  # Build the default config
  cfg = model.build_config(model_dir=model_dir, data_path=FLAGS.data_path)

  if FLAGS.config_file:
    with tf.gfile.GFile(FLAGS.config_file) as f:
      file_cfg = json.load(f)
      file_cfg = utils.to_config(file_cfg)
    tf.logging.info("Loaded config from file:\n%s" % file_cfg)
    cfg = utils.merge_fixed_structure(cfg, file_cfg)

  # Override from flags
  overrides = dict()
  if FLAGS.config:
    overrides = utils.parse_config_string(FLAGS.config)
    tf.logging.info("Parsed config overrides:\n%s" % overrides)
    cfg = utils.merge_fixed_structure(cfg, overrides)

  if FLAGS.master:
    cfg.master = FLAGS.master

  cfg = utils.merge_fixed_structure(cfg, utils.unflatten_dict(hparams))

  tf.logging.info("Operative config:\n%s" % cfg)

  return cfg
コード例 #2
0
def evaluate(override_cfg, model_dir, continuous=True):
  """Run training and evaluation."""
  tf.logging.info("model_dir = " + model_dir)
  try:
    cfg = _load_config(model_dir)
  except tf.errors.NotFoundError:
    tf.logging.info("Model directory does not exist yet. Creating new config.")
    cfg = model.build_config(model_dir=model_dir, data_path=FLAGS.data_path)
  tf.logging.info(cfg)
  tf.logging.info(override_cfg)
  cfg = utils.merge(cfg, override_cfg)

  cfg.tpu.enable = False
  cfg.dataset.max_length = None

  # Construct inputs and estimator
  _, eval_input = data.build_dataset(cfg.dataset, is_tpu=cfg.tpu.enable)
  estimator = model.get_estimator(**cfg)
  if continuous:
    checkpoints_iterator = tf.contrib.training.checkpoints_iterator(
        cfg.model_dir)
    eval_metrics = None
    for ckpt_path in checkpoints_iterator:
      eval_metrics = estimator.evaluate(
          input_fn=eval_input, checkpoint_path=ckpt_path)
      tf.logging.info(pprint.pformat(eval_metrics))
    return eval_metrics
  else:
    eval_metrics = estimator.evaluate(input_fn=eval_input)
    return eval_metrics