Esempio n. 1
0
    def _evaluate_model(self,
                        input_fn,
                        steps,
                        feed_fn=None,
                        metrics=None,
                        name=''):
        if self._config.execution_mode not in ('all', 'evaluate',
                                               'eval_evalset'):
            return

        checkpoint_path = self._model_dir
        eval_dir = os.path.join(self._model_dir,
                                'eval' if not name else 'eval_' + name)
        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = contrib_framework.create_global_step(g)
            features, targets = input_fn()
            self._check_inputs(features, targets)
            eval_dict = self._get_eval_ops(features, targets, metrics)
            update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
            eval_results, _ = evaluate(graph=g,
                                       output_dir=eval_dir,
                                       checkpoint_path=checkpoint_path,
                                       eval_dict=eval_dict,
                                       update_op=update_op,
                                       global_step_tensor=global_step,
                                       supervisor_master=self._config.master,
                                       feed_fn=feed_fn,
                                       max_steps=steps)
            return eval_results
Esempio n. 2
0
  def _evaluate_model(self,
                      input_fn,
                      steps,
                      feed_fn=None,
                      metrics=None,
                      name=''):
    if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'):
      return

    checkpoint_path = self._model_dir
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)
    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step = contrib_framework.create_global_step(g)
      features, targets = input_fn()
      self._check_inputs(features, targets)
      eval_dict = self._get_eval_ops(features, targets, metrics)
      update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
      eval_results, _ = evaluate(graph=g,
                                 output_dir=eval_dir,
                                 checkpoint_path=checkpoint_path,
                                 eval_dict=eval_dict,
                                 update_op=update_op,
                                 global_step_tensor=global_step,
                                 supervisor_master=self._config.master,
                                 feed_fn=feed_fn,
                                 max_steps=steps)
      return eval_results
Esempio n. 3
0
  def _evaluate_model(self,
                      input_fn,
                      steps,
                      feed_fn=None,
                      metrics=None,
                      name=''):
    # TODO(wicke): Remove this once Model and associated code are gone.
    if (hasattr(self._config, 'execution_mode') and
        self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
      return None, None

    # Check that model has been trained.
    checkpoint_path = self._model_dir
    latest_path = saver.latest_checkpoint(checkpoint_path)
    if not latest_path:
      raise NotFittedError("Couldn't find trained model at %s."
                           % checkpoint_path)
    # Setup output directory.
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)

    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step = contrib_framework.create_global_step(g)
      features, labels = input_fn()
      self._check_inputs(features, labels)

      # The default return type of _get_eval_ops is ModelFnOps. But there are
      # some subclasses of tf.contrib.learn.Estimator which override this
      # method and use the legacy signature, namely _get_eval_ops returns an
      # `eval_dict` dictionary of Tensors. The following else-statement code
      # covers these cases, but will soon be deleted after the subclasses are
      # updated.
      # TODO(b/32664904): Update subclasses and delete the else-statement.
      eval_ops = self._get_eval_ops(features, labels, metrics)
      if isinstance(eval_ops, ModelFnOps):  # Default signature
        eval_dict = eval_ops.eval_metric_ops
      else:  # Legacy signature
        eval_dict = eval_ops

      update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
      eval_results, current_global_step = graph_actions.evaluate(
          graph=g,
          output_dir=eval_dir,
          checkpoint_path=checkpoint_path,
          eval_dict=eval_dict,
          update_op=update_op,
          global_step_tensor=global_step,
          supervisor_master=self._config.evaluation_master,
          feed_fn=feed_fn,
          max_steps=steps)

      return eval_results, current_global_step
Esempio n. 4
0
  def _evaluate_model(self,
                      input_fn,
                      steps,
                      feed_fn=None,
                      metrics=None,
                      name=''):
    # TODO(wicke): Remove this once Model and associated code are gone.
    if (hasattr(self._config, 'execution_mode') and
        self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
      return None, None

    # Check that model has been trained.
    checkpoint_path = self._model_dir
    latest_path = saver.latest_checkpoint(checkpoint_path)
    if not latest_path:
      raise NotFittedError("Couldn't find trained model at %s."
                           % checkpoint_path)
    # Setup output directory.
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)

    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step = contrib_framework.create_global_step(g)
      features, labels = input_fn()
      self._check_inputs(features, labels)

      # The default return type of _get_eval_ops is ModelFnOps. But there are
      # some subclasses of tf.contrib.learn.Estimator which override this
      # method and use the legacy signature, namely _get_eval_ops returns an
      # `eval_dict` dictionary of Tensors. The following else-statement code
      # covers these cases, but will soon be deleted after the subclasses are
      # updated.
      # TODO(b/32664904): Update subclasses and delete the else-statement.
      eval_ops = self._get_eval_ops(features, labels, metrics)
      if isinstance(eval_ops, model_fn_lib.ModelFnOps):  # Default signature
        eval_dict = eval_ops.eval_metric_ops
      else:  # Legacy signature
        eval_dict = eval_ops

      update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
      eval_results, current_global_step = graph_actions.evaluate(
          graph=g,
          output_dir=eval_dir,
          checkpoint_path=checkpoint_path,
          eval_dict=eval_dict,
          update_op=update_op,
          global_step_tensor=global_step,
          supervisor_master=self._config.evaluation_master,
          feed_fn=feed_fn,
          max_steps=steps)

      return eval_results, current_global_step
Esempio n. 5
0
    def _evaluate_model(self,
                        input_fn,
                        steps,
                        feed_fn=None,
                        metrics=None,
                        name=''):
        # TODO(wicke): Remove this once Model and associated code are gone.
        if (hasattr(self._config, 'execution_mode')
                and self._config.execution_mode
                not in ('all', 'evaluate', 'eval_evalset')):
            return None, None

        # Check that model has been trained.
        checkpoint_path = self._model_dir
        latest_path = saver.latest_checkpoint(checkpoint_path)
        if not latest_path:
            raise NotFittedError("Couldn't find trained model at %s." %
                                 checkpoint_path)
        # Setup output directory.
        eval_dir = os.path.join(self._model_dir,
                                'eval' if not name else 'eval_' + name)

        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = contrib_framework.create_global_step(g)
            features, targets = input_fn()
            self._check_inputs(features, targets)
            eval_dict = self._get_eval_ops(features, targets, metrics)
            update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
            eval_results, current_global_step = graph_actions.evaluate(
                graph=g,
                output_dir=eval_dir,
                checkpoint_path=checkpoint_path,
                eval_dict=eval_dict,
                update_op=update_op,
                global_step_tensor=global_step,
                supervisor_master=self._config.master,
                feed_fn=feed_fn,
                max_steps=steps)

            return eval_results, current_global_step
Esempio n. 6
0
  def _evaluate_model(self,
                      input_fn,
                      steps,
                      feed_fn=None,
                      metrics=None,
                      name=''):
    # TODO(wicke): Remove this once Model and associated code are gone.
    if (hasattr(self._config, 'execution_mode') and
        self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
      return None, None

    # Check that model has been trained.
    checkpoint_path = self._model_dir
    latest_path = saver.latest_checkpoint(checkpoint_path)
    if not latest_path:
      raise NotFittedError("Couldn't find trained model at %s."
                           % checkpoint_path)
    # Setup output directory.
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)

    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step = contrib_framework.create_global_step(g)
      features, targets = input_fn()
      self._check_inputs(features, targets)
      eval_dict = self._get_eval_ops(features, targets, metrics)
      update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
      eval_results, current_global_step = graph_actions.evaluate(
          graph=g,
          output_dir=eval_dir,
          checkpoint_path=checkpoint_path,
          eval_dict=eval_dict,
          update_op=update_op,
          global_step_tensor=global_step,
          supervisor_master=self._config.evaluation_master,
          feed_fn=feed_fn,
          max_steps=steps)

      return eval_results, current_global_step
Esempio n. 7
0
    def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None):
        if self._config.execution_mode not in ('all', 'evaluate',
                                               'eval_evalset'):
            return

        checkpoint_path = saver.latest_checkpoint(self._model_dir)
        eval_dir = os.path.join(self._model_dir, 'eval')
        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = contrib_framework.create_global_step(g)
            features, targets = input_fn()
            self._check_inputs(features, targets)
            eval_dict = self._get_eval_ops(
                features, targets, metrics if metrics is not None else
                self._get_default_metric_functions())
            eval_results, _ = evaluate(graph=g,
                                       output_dir=eval_dir,
                                       checkpoint_path=checkpoint_path,
                                       eval_dict=eval_dict,
                                       global_step_tensor=global_step,
                                       supervisor_master=self._config.master,
                                       feed_fn=feed_fn,
                                       max_steps=steps)
            return eval_results
Esempio n. 8
0
  def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None):
    if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'):
      return

    checkpoint_path = saver.latest_checkpoint(self._model_dir)
    eval_dir = os.path.join(self._model_dir, 'eval')
    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step = contrib_framework.create_global_step(g)
      features, targets = input_fn()
      self._check_inputs(features, targets)
      eval_dict = self._get_eval_ops(features, targets,
                                     metrics if metrics is not None else
                                     self._get_default_metric_functions())
      eval_results, _ = evaluate(
          graph=g,
          output_dir=eval_dir,
          checkpoint_path=checkpoint_path,
          eval_dict=eval_dict,
          global_step_tensor=global_step,
          supervisor_master=self._config.master,
          feed_fn=feed_fn,
          max_steps=steps)
      return eval_results