def run(hparams, run_dir):
    """Run train/eval/test."""
    train_dir = os.path.join(run_dir, 'train')

    if FLAGS.mode == 'eval':
        eval_dir = os.path.join(run_dir, 'eval')
        if FLAGS.eval_dir:
            eval_dir = os.path.join(eval_dir, FLAGS.eval_dir)
        train_util.evaluate(train_dir=train_dir,
                            eval_dir=eval_dir,
                            examples_path=FLAGS.examples_path,
                            num_batches=FLAGS.eval_num_batches,
                            hparams=hparams)
    elif FLAGS.mode == 'test':
        checkpoint_path = (os.path.expanduser(FLAGS.checkpoint_path)
                           if FLAGS.checkpoint_path else
                           tf.train.latest_checkpoint(train_dir))
        tf.logging.info('Testing with checkpoint: %s', checkpoint_path)
        test_dir = os.path.join(run_dir, 'test')
        train_util.test(checkpoint_path=checkpoint_path,
                        test_dir=test_dir,
                        examples_path=FLAGS.examples_path,
                        num_batches=FLAGS.eval_num_batches,
                        hparams=hparams)
    elif FLAGS.mode == 'train':
        train_util.train(train_dir=train_dir,
                         examples_path=FLAGS.examples_path,
                         hparams=hparams,
                         checkpoints_to_keep=FLAGS.checkpoints_to_keep,
                         num_steps=FLAGS.num_steps)
    else:
        raise ValueError('Invalid mode: {}'.format(FLAGS.mode))
def run(hparams, run_dir):
  """Run train/eval/test."""
  train_dir = os.path.join(run_dir, 'train')

  if FLAGS.mode == 'eval':
    eval_dir = os.path.join(run_dir, 'eval')
    if FLAGS.eval_dir:
      eval_dir = os.path.join(eval_dir, FLAGS.eval_dir)
    train_util.evaluate(
        train_dir=train_dir,
        eval_dir=eval_dir,
        examples_path=FLAGS.examples_path,
        num_batches=FLAGS.eval_num_batches,
        hparams=hparams)
  elif FLAGS.mode == 'test':
    checkpoint_path = (os.path.expanduser(FLAGS.checkpoint_path)
                       if FLAGS.checkpoint_path else
                       tf.train.latest_checkpoint(train_dir))
    tf.logging.info('Testing with checkpoint: %s', checkpoint_path)
    test_dir = os.path.join(run_dir, 'test')
    train_util.test(
        checkpoint_path=checkpoint_path,
        test_dir=test_dir,
        examples_path=FLAGS.examples_path,
        num_batches=FLAGS.eval_num_batches,
        hparams=hparams)
  elif FLAGS.mode == 'train':
    train_util.train(
        train_dir=train_dir,
        examples_path=FLAGS.examples_path,
        hparams=hparams,
        checkpoints_to_keep=FLAGS.checkpoints_to_keep,
        num_steps=FLAGS.num_steps)
  else:
    raise ValueError('Invalid mode: {}'.format(FLAGS.mode))
def run(config_map, data_fn, additional_trial_info):
    """Run training or evaluation."""
    tf.logging.set_verbosity(FLAGS.log)

    config = config_map[FLAGS.config]
    model_dir = os.path.expanduser(FLAGS.model_dir)

    hparams = config.hparams

    # Command line flags override any of the preceding hyperparameter values.
    hparams.parse(FLAGS.hparams)

    if FLAGS.mode == 'train':
        train_util.train(model_fn=config.model_fn,
                         data_fn=data_fn,
                         additional_trial_info=additional_trial_info,
                         master=FLAGS.master,
                         model_dir=model_dir,
                         use_tpu=FLAGS.use_tpu,
                         preprocess_examples=FLAGS.preprocess_examples,
                         hparams=hparams,
                         keep_checkpoint_max=FLAGS.keep_checkpoint_max,
                         num_steps=FLAGS.num_steps)
    elif FLAGS.mode == 'eval':
        train_util.evaluate(model_fn=config.model_fn,
                            data_fn=data_fn,
                            additional_trial_info=additional_trial_info,
                            master=FLAGS.master,
                            model_dir=model_dir,
                            name=FLAGS.eval_name,
                            preprocess_examples=FLAGS.preprocess_examples,
                            hparams=hparams,
                            num_steps=FLAGS.eval_num_steps)
    else:
        raise ValueError('Unknown/unsupported mode: %s' % FLAGS.mode)
예제 #4
0
def run(config_map, semisupervised_examples_map=None):
  """Run training or evaluation."""
  tf.logging.set_verbosity(FLAGS.log)

  # Validate data path flags.
  if not FLAGS.examples_path and not FLAGS.semisupervised_examples_config:
    raise ValueError('You must set flags for either `examples_path` or '
                     '`semisupervised_examples_config`.')
  if FLAGS.examples_path and FLAGS.semisupervised_examples_config:
    raise ValueError('You must only set one of either `examples_path` or '
                     '`semisupervised_examples_config`.')
  if not FLAGS.examples_path and FLAGS.mode == 'eval':
    raise ValueError('You must set flags for `examples_path` if in eval mode.')

  semisupervised_configs = (
      semisupervised_examples_map[FLAGS.semisupervised_examples_config]
      if FLAGS.semisupervised_examples_config and semisupervised_examples_map
      else None)

  config = config_map[FLAGS.config]
  model_dir = os.path.expanduser(FLAGS.model_dir)

  hparams = config.hparams

  # Command line flags override any of the preceding hyperparameter values.
  hparams.parse(FLAGS.hparams)

  if FLAGS.mode == 'train':
    train_util.train(
        model_fn=config.model_fn,
        master=FLAGS.master,
        model_dir=model_dir,
        use_tpu=FLAGS.use_tpu,
        examples_path=FLAGS.examples_path,
        preprocess_examples=FLAGS.preprocess_examples,
        hparams=hparams,
        keep_checkpoint_max=FLAGS.keep_checkpoint_max,
        num_steps=FLAGS.num_steps,
        semisupervised_configs=semisupervised_configs)
  elif FLAGS.mode == 'eval':
    train_util.evaluate(
        model_fn=config.model_fn,
        master=FLAGS.master,
        model_dir=model_dir,
        name=FLAGS.eval_name,
        examples_path=FLAGS.examples_path,
        preprocess_examples=FLAGS.preprocess_examples,
        hparams=hparams,
        num_steps=FLAGS.eval_num_steps)
  else:
    raise ValueError('Unknown/unsupported mode: %s' % FLAGS.mode)
def run(hparams, run_dir):
  """Run train/eval/test."""
  train_dir = os.path.join(run_dir, 'train')

  if FLAGS.mode == 'eval':
    eval_dir = os.path.join(run_dir, 'eval')
    if FLAGS.eval_dir:
      eval_dir = os.path.join(eval_dir, FLAGS.eval_dir)
    train_util.evaluate(
        train_dir=train_dir,
        eval_dir=eval_dir,
        examples_path=FLAGS.examples_path,
        num_batches=FLAGS.eval_num_batches,
        hparams=hparams,
        master=FLAGS.master)
  elif FLAGS.mode == 'test':
    checkpoint_path = tf.train.latest_checkpoint(train_dir)
    if FLAGS.checkpoint_path:
      checkpoint_path = os.path.expanduser(FLAGS.checkpoint_path)

    tf.logging.info('Testing with checkpoint: %s', checkpoint_path)
    test_dir = os.path.join(run_dir, 'test')
    train_util.test(
        checkpoint_path=checkpoint_path,
        test_dir=test_dir,
        examples_path=FLAGS.examples_path,
        num_batches=FLAGS.eval_num_batches,
        hparams=hparams,
        master=FLAGS.master)
  elif FLAGS.mode == 'train':
    train_util.train(
        train_dir=train_dir,
        examples_path=FLAGS.examples_path,
        hparams=hparams,
        checkpoints_to_keep=FLAGS.checkpoints_to_keep,
        num_steps=FLAGS.num_steps,
        master=FLAGS.master,
        task=FLAGS.ps_task,
        num_ps_tasks=FLAGS.num_ps_tasks)
예제 #6
0
def run(hparams, run_dir):
    """Run train/eval/test."""
    train_dir = os.path.join(run_dir, 'train')

    if FLAGS.mode == 'eval':
        eval_dir = os.path.join(run_dir, 'eval')
        if FLAGS.eval_dir:
            eval_dir = os.path.join(eval_dir, FLAGS.eval_dir)
        train_util.evaluate(train_dir=train_dir,
                            eval_dir=eval_dir,
                            examples_path=FLAGS.examples_path,
                            num_batches=FLAGS.eval_num_batches,
                            hparams=hparams,
                            master=FLAGS.master)
    elif FLAGS.mode == 'test':
        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        if FLAGS.checkpoint_path:
            checkpoint_path = os.path.expanduser(FLAGS.checkpoint_path)

        tf.logging.info('Testing with checkpoint: %s', checkpoint_path)
        test_dir = os.path.join(run_dir, 'test')
        train_util.test(checkpoint_path=checkpoint_path,
                        test_dir=test_dir,
                        examples_path=FLAGS.examples_path,
                        num_batches=FLAGS.eval_num_batches,
                        hparams=hparams,
                        master=FLAGS.master)
    elif FLAGS.mode == 'train':
        train_util.train(train_dir=train_dir,
                         examples_path=FLAGS.examples_path,
                         hparams=hparams,
                         checkpoints_to_keep=FLAGS.checkpoints_to_keep,
                         num_steps=FLAGS.num_steps,
                         master=FLAGS.master,
                         task=FLAGS.ps_task,
                         num_ps_tasks=FLAGS.num_ps_tasks)