def after_run(self, run_context, run_values):  # pylint: disable=unused-argument
        """Runs evaluator."""
        step = np.asscalar(run_context.session.run(self._global_step_tensor))

        if self._timer.should_trigger_for_step(step):
            logging.info('Starting eval.')
            eval_results = self._evaluate(run_context.session, step)
            mlp_log.mlperf_print('eval_accuracy',
                                 float(eval_results[_EVAL_METRIC]),
                                 metadata={
                                     'epoch_num':
                                     max(step // self._steps_per_epoch - 1, 0)
                                 })

            # The ImageNet eval size is hard coded.
            if eval_results[_EVAL_METRIC] >= self._stop_threshold:
                self._run_success = True
                mlp_log.mlperf_print('run_stop',
                                     None,
                                     metadata={'status': 'success'})
                mlp_log.mlperf_print('run_final', None)
                run_context.request_stop()

        if step // self._steps_per_epoch == self._eval_every_epoch_from:
            self._timer = training.SecondOrStepTimer(
                every_steps=self._steps_per_epoch)
            self._timer.reset()
Exemplo n.º 2
0
  def __init__(self,
               estimator,
               input_fn,
               steps=None,
               hooks=None,
               name=None,
               every_n_iter=100):
    """Initializes a `InMemoryEvaluatorHook`.

    Args:
      estimator: A `tf.estimator.Estimator` instance to call evaluate.
      input_fn:  Equivalent to the `input_fn` arg to `estimator.evaluate`. A
        function that constructs the input data for evaluation.
        See @{$premade_estimators#create_input_functions} for more
        information. The function should construct and return one of
        the following:

          * A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
            tuple (features, labels) with same constraints as below.
          * A tuple (features, labels): Where `features` is a `Tensor` or a
            dictionary of string feature name to `Tensor` and `labels` is a
            `Tensor` or a dictionary of string label name to `Tensor`. Both
            `features` and `labels` are consumed by `model_fn`. They should
            satisfy the expectation of `model_fn` from inputs.

      steps: Equivalent to the `steps` arg to `estimator.evaluate`.  Number of
        steps for which to evaluate model. If `None`, evaluates until `input_fn`
        raises an end-of-input exception.
      hooks: Equivalent to the `hooks` arg to `estimator.evaluate`. List of
        `SessionRunHook` subclass instances. Used for callbacks inside the
        evaluation call.
      name:  Equivalent to the `name` arg to `estimator.evaluate`. Name of the
        evaluation if user needs to run multiple evaluations on different data
        sets, such as on training data vs test data. Metrics for different
        evaluations are saved in separate folders, and appear separately in
        tensorboard.
      every_n_iter: `int`, runs the evaluator once every N training iteration.

    Raises:
      ValueError: if `every_n_iter` is non-positive or it's not a single machine
        training
    """
    if every_n_iter is None or every_n_iter <= 0:
      raise ValueError('invalid every_n_iter=%s.' % every_n_iter)
    if (estimator.config.num_ps_replicas > 0 or
        estimator.config.num_worker_replicas > 1):
      raise ValueError(
          'InMemoryEvaluator supports only single machine (aka Local) setting.')
    self._estimator = estimator
    self._input_fn = input_fn
    self._steps = steps
    self._name = name
    self._every_n_iter = every_n_iter
    self._eval_dir = os.path.join(self._estimator.model_dir, 'eval'
                                  if not name else 'eval_' + name)

    self._graph = None
    self._hooks = estimator_lib._check_hooks_type(hooks)
    self._hooks.extend(self._estimator._convert_eval_steps_to_hooks(steps))
    self._timer = training.SecondOrStepTimer(every_steps=every_n_iter)
  def after_run(self, run_context, run_values):  # pylint: disable=unused-argument
    """Runs evaluator."""
    step = np.asscalar(run_context.session.run(self._global_step_tensor))

    if self._timer.should_trigger_for_step(step):
      logging.info('Starting eval.')
      eval_results = self._evaluate(run_context.session, step)
      mlperf_log.resnet_print(key=mlperf_log.EVAL_STOP)
      mlperf_log.resnet_print(
          key=mlperf_log.EVAL_ACCURACY,
          value={
              'epoch': max(step // self._steps_per_epoch - 1, 0),
              'value': float(eval_results[_EVAL_METRIC])
          })

      # The ImageNet eval size is hard coded.
      mlperf_log.resnet_print(key=mlperf_log.EVAL_SIZE, value=50000)
      if eval_results[_EVAL_METRIC] >= self._stop_threshold:
        self._run_success = True
        mlperf_log.resnet_print(
            key=mlperf_log.RUN_STOP, value={'success': 'true'})
        run_context.request_stop()

    if step // self._steps_per_epoch == self._eval_every_epoch_from:
      self._timer = training.SecondOrStepTimer(
          every_steps=self._steps_per_epoch)
      self._timer.reset()