Esempio n. 1
0
    def testEvalOpAndFinalOp(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'eval_ops_and_final_ops')

        # Train a model for a single step to get a checkpoint.
        self._train_model(checkpoint_dir, num_steps=1)
        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        # Create the model so we have something to restore.
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        logistic_classifier(inputs)

        num_evals = 5
        final_increment = 9.0

        my_var = local_variable(0.0, name='MyVar')
        eval_ops = state_ops.assign_add(my_var, 1.0)
        final_ops = array_ops.identity(my_var) + final_increment

        final_hooks = [
            evaluation._StopAfterNEvalsHook(num_evals),
        ]
        initial_hooks = list(final_hooks)
        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=eval_ops,
            final_ops={'value': final_ops},
            hooks=final_hooks)
        self.assertEqual(final_ops_values['value'],
                         num_evals + final_increment)
        self.assertEqual(initial_hooks, final_hooks)
Esempio n. 2
0
    def testMultiEvalStepIncrements(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'eval_ops_and_final_ops')

        # Train a model for a single step to get a checkpoint.
        self._train_model(checkpoint_dir, num_steps=1)
        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        # Create the model so we have something to restore.
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        logistic_classifier(inputs)

        num_evals = 6

        my_var = local_variable(0.0, name='MyVar')
        # In eval ops, we also increase the eval step one more time.
        eval_ops = [
            state_ops.assign_add(my_var, 1.0),
            state_ops.assign_add(evaluation._get_or_create_eval_step(),
                                 1,
                                 use_locking=True)
        ]
        expect_eval_update_counts = num_evals // 2

        final_ops = array_ops.identity(my_var)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=eval_ops,
            final_ops={'value': final_ops},
            hooks=[
                evaluation._StopAfterNEvalsHook(num_evals),
            ])
        self.assertEqual(final_ops_values['value'], expect_eval_update_counts)
Esempio n. 3
0
    def _evaluate_model(self,
                        input_fn,
                        hooks=None,
                        checkpoint_path=None,
                        name=''):
        """Evaluates the model using the training.evaluation library."""
        # Check that model has been trained (if nothing has been set explicitly).
        if not checkpoint_path:
            latest_path = saver.latest_checkpoint(self._model_dir)
            if not latest_path:
                raise ValueError(
                    'Could not find trained model in model_dir: {}.'.format(
                        self._model_dir))
            checkpoint_path = latest_path

        # Setup output directory.
        eval_dir = os.path.join(self._model_dir,
                                'eval' if not name else 'eval_' + name)

        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step_tensor = self._create_and_assert_global_step(g)
            features, labels = self._get_features_and_labels_from_input_fn(
                input_fn, model_fn_lib.ModeKeys.EVAL)
            estimator_spec = self._call_model_fn(features, labels,
                                                 model_fn_lib.ModeKeys.EVAL)

            if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops:
                raise ValueError(
                    'Metric with name "%s" is not allowed, because Estimator '
                    % (model_fn_lib.LOSS_METRIC_KEY) +
                    'already defines a default metric with the same name.')
            estimator_spec.eval_metric_ops[
                model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(
                    estimator_spec.loss)

            update_op, eval_dict = _extract_metric_update_ops(
                estimator_spec.eval_metric_ops)

            if ops.GraphKeys.GLOBAL_STEP in eval_dict:
                raise ValueError(
                    'Metric with name `global_step` is not allowed, because Estimator '
                    'already defines a default metric with the same name.')
            eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor

            eval_results = evaluation._evaluate_once(  # pylint: disable=protected-access
                checkpoint_path=checkpoint_path,
                master=self._config.evaluation_master,
                scaffold=estimator_spec.scaffold,
                eval_ops=update_op,
                final_ops=eval_dict,
                hooks=hooks,
                config=self._session_config)

            _write_dict_to_summary(
                output_dir=eval_dir,
                dictionary=eval_results,
                current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

        return eval_results
Esempio n. 4
0
    def testEvaluatePerfectModel(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_perfect_model_once')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metrics.accuracy(predictions=predictions,
                                               labels=labels)

        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation._StopAfterNEvalsHook(1),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
Esempio n. 5
0
  def testEvalOpAndFinalOp(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')

    # Train a model for a single step to get a checkpoint.
    self._train_model(checkpoint_dir, num_steps=1)
    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    # Create the model so we have something to restore.
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    logistic_classifier(inputs)

    num_evals = 5
    final_increment = 9.0

    my_var = local_variable(0.0, name='MyVar')
    eval_ops = state_ops.assign_add(my_var, 1.0)
    final_ops = array_ops.identity(my_var) + final_increment

    final_hooks = [evaluation._StopAfterNEvalsHook(num_evals),]
    initial_hooks = list(final_hooks)
    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=eval_ops,
        final_ops={'value': final_ops},
        hooks=final_hooks)
    self.assertEqual(final_ops_values['value'], num_evals + final_increment)
    self.assertEqual(initial_hooks, final_hooks)
Esempio n. 6
0
  def testMultiEvalStepIncrements(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')

    # Train a model for a single step to get a checkpoint.
    self._train_model(checkpoint_dir, num_steps=1)
    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    # Create the model so we have something to restore.
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    logistic_classifier(inputs)

    num_evals = 6

    my_var = local_variable(0.0, name='MyVar')
    # In eval ops, we also increase the eval step one more time.
    eval_ops = [state_ops.assign_add(my_var, 1.0),
                state_ops.assign_add(
                    evaluation._get_or_create_eval_step(), 1, use_locking=True)]
    expect_eval_update_counts = num_evals // 2

    final_ops = array_ops.identity(my_var)

    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=eval_ops,
        final_ops={'value': final_ops},
        hooks=[evaluation._StopAfterNEvalsHook(num_evals),])
    self.assertEqual(final_ops_values['value'], expect_eval_update_counts)
  def testEvaluateWithFiniteInputs(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_with_finite_inputs')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run evaluation. Inputs are fed through input producer for one epoch.
    all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    single_input, single_label = training.slice_input_producer(
        [all_inputs, all_labels], num_epochs=1)
    inputs, labels = training.batch([single_input, single_label], batch_size=6,
                                    allow_smaller_final_batch=True)

    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metrics.accuracy(
        predictions=predictions, labels=labels)

    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy,
                   'eval_steps': evaluation._get_or_create_eval_step()},
        hooks=[evaluation._StopAfterNEvalsHook(None),])
    self.assertTrue(final_ops_values['accuracy'] > .99)
    # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
    # each; the 3rd iter evaluates the remaining 4 inputs, and the last one
    # triggers an error which stops evaluation.
    self.assertEqual(final_ops_values['eval_steps'], 4)
Esempio n. 8
0
  def _evaluate_model(self,
                      input_fn,
                      hooks=None,
                      checkpoint_path=None,
                      name=''):
    """Evaluates the model using the training.evaluation library."""
    # Check that model has been trained (if nothing has been set explicitly).
    if not checkpoint_path:
      latest_path = saver.latest_checkpoint(self._model_dir)
      if not latest_path:
        raise ValueError('Could not find trained model in model_dir: {}.'.
                         format(self._model_dir))
      checkpoint_path = latest_path

    # Setup output directory.
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)

    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step_tensor = self._create_and_assert_global_step(g)
      features, labels = self._get_features_and_labels_from_input_fn(
          input_fn, model_fn_lib.ModeKeys.EVAL)
      estimator_spec = self._call_model_fn(
          features, labels, model_fn_lib.ModeKeys.EVAL)

      if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops:
        raise ValueError(
            'Metric with name "%s" is not allowed, because Estimator ' % (
                model_fn_lib.LOSS_METRIC_KEY) +
            'already defines a default metric with the same name.')
      estimator_spec.eval_metric_ops[
          model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss)

      update_op, eval_dict = _extract_metric_update_ops(
          estimator_spec.eval_metric_ops)

      if ops.GraphKeys.GLOBAL_STEP in eval_dict:
        raise ValueError(
            'Metric with name `global_step` is not allowed, because Estimator '
            'already defines a default metric with the same name.')
      eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor

      eval_results = evaluation._evaluate_once(  # pylint: disable=protected-access
          checkpoint_path=checkpoint_path,
          master=self._config.evaluation_master,
          scaffold=estimator_spec.scaffold,
          eval_ops=update_op,
          final_ops=eval_dict,
          hooks=hooks,
          config=self._session_config)

      _write_dict_to_summary(
          output_dir=eval_dir,
          dictionary=eval_results,
          current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

    return eval_results
Esempio n. 9
0
    def _evaluate_model(self,
                        input_fn,
                        hooks=None,
                        checkpoint_path=None,
                        name=''):
        # Check that model has been trained (if nothing has been set explicitly).
        if not checkpoint_path:
            latest_path = saver.latest_checkpoint(self._model_dir)
            if not latest_path:
                error_message = "Could not find trained model at {}.".format(
                    self._model_dir)
                raise EstimatorNotTrainedError(error_message)
            checkpoint_path = latest_path

        # Setup output directory.
        eval_dir = os.path.join(self._model_dir,
                                'eval' if not name else 'eval_' + name)

        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = training.create_global_step(g)
            features, labels = input_fn()

            estimator_spec = self._call_model_fn(features, labels,
                                                 ModeKeys.EVAL)
            if model_fn_lib.MetricKeys.LOSS in estimator_spec.eval_metric_ops:
                raise ValueError(
                    "Metric with name `{}` is not allowed, because Estimator "
                    "already defines a default metric "
                    "with the same name.".format(model_fn_lib.MetricKeys.LOSS))
            estimator_spec.eval_metric_ops[
                model_fn_lib.MetricKeys.LOSS] = metrics_lib.streaming_mean(
                    estimator_spec.loss)
            update_op, eval_dict = self._extract_metric_update_ops(
                estimator_spec.eval_metric_ops)

            if ops.GraphKeys.GLOBAL_STEP in eval_dict:
                raise ValueError(
                    "Metric with name `global_step` is not allowed, because "
                    "Estimator already defines a default metric with the same name."
                )
            eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step

            eval_results = evaluation._evaluate_once(
                checkpoint_path=checkpoint_path,
                master=self._config.evaluation_master,
                scaffold=estimator_spec.scaffold,
                eval_ops=update_op,
                final_ops=eval_dict,
                hooks=hooks,
                config=self._session_config)

            self._write_dict_to_summary(
                output_dir=eval_dir,
                dictionary=eval_results,
                current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

            return eval_results
Esempio n. 10
0
    def _evaluate_model(self,
                        input_fn,
                        hooks=None,
                        checkpoint_path=None,
                        name=''):
        """Evaluates the model using the training.evaluation library."""
        # Check that model has been trained (if nothing has been set explicitly).
        if not checkpoint_path:
            latest_path = saver.latest_checkpoint(self._model_dir)
            if not latest_path:
                raise ValueError(
                    'Could not find trained model in model_dir: {}.'.format(
                        self._model_dir))
            checkpoint_path = latest_path

        # Setup output directory.
        eval_dir = os.path.join(self._model_dir,
                                'eval' if not name else 'eval_' + name)

        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step_tensor = training.create_global_step(g)
            features, labels = input_fn()
            estimator_spec = self._call_model_fn(features, labels,
                                                 model_fn_lib.ModeKeys.EVAL)

            self._verify_default_metric_key(model_fn_lib.MetricKeys.LOSS,
                                            estimator_spec.eval_metric_ops)
            estimator_spec.eval_metric_ops[
                model_fn_lib.MetricKeys.LOSS] = metrics_lib.mean(
                    estimator_spec.loss)

            update_op, eval_dict = _extract_metric_update_ops(
                estimator_spec.eval_metric_ops)

            self._verify_default_metric_key(ops.GraphKeys.GLOBAL_STEP,
                                            eval_dict)
            eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor

            eval_results = evaluation._evaluate_once(  # pylint: disable=protected-access
                checkpoint_path=checkpoint_path,
                master=self._config.evaluation_master,
                scaffold=estimator_spec.scaffold,
                eval_ops=update_op,
                final_ops=eval_dict,
                hooks=hooks,
                config=config_pb2.ConfigProto(allow_soft_placement=True))

            _write_dict_to_summary(
                output_dir=eval_dir,
                dictionary=eval_results,
                current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

        return eval_results
Esempio n. 11
0
  def _evaluate_model(self,
                      input_fn,
                      hooks=None,
                      checkpoint_path=None,
                      name=''):
    """Evaluates the model using the training.evaluation library."""
    # Check that model has been trained (if nothing has been set explicitly).
    if not checkpoint_path:
      latest_path = saver.latest_checkpoint(self._model_dir)
      if not latest_path:
        raise ValueError('Could not find trained model in model_dir: {}.'.
                         format(self._model_dir))
      checkpoint_path = latest_path

    # Setup output directory.
    eval_dir = os.path.join(self._model_dir, 'eval' if not name else
                            'eval_' + name)

    with ops.Graph().as_default() as g:
      random_seed.set_random_seed(self._config.tf_random_seed)
      global_step_tensor = training.create_global_step(g)
      features, labels = input_fn()
      estimator_spec = self._call_model_fn(
          features, labels, model_fn_lib.ModeKeys.EVAL)

      self._verify_default_metric_key(model_fn_lib.MetricKeys.LOSS,
                                      estimator_spec.eval_metric_ops)
      estimator_spec.eval_metric_ops[
          model_fn_lib.MetricKeys.LOSS] = metrics_lib.mean(estimator_spec.loss)

      update_op, eval_dict = _extract_metric_update_ops(
          estimator_spec.eval_metric_ops)

      self._verify_default_metric_key(ops.GraphKeys.GLOBAL_STEP, eval_dict)
      eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor

      eval_results = evaluation._evaluate_once(  # pylint: disable=protected-access
          checkpoint_path=checkpoint_path,
          master=self._config.evaluation_master,
          scaffold=estimator_spec.scaffold,
          eval_ops=update_op,
          final_ops=eval_dict,
          hooks=hooks,
          config=config_pb2.ConfigProto(allow_soft_placement=True))

      _write_dict_to_summary(
          output_dir=eval_dir,
          dictionary=eval_results,
          current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

    return eval_results
Esempio n. 12
0
    def _evaluate_model(self, input_fn, hooks=None, checkpoint_path=None, name=''):
        # Check that model has been trained (if nothing has been set explicitly).
        if not checkpoint_path:
            latest_path = saver.latest_checkpoint(self._model_dir)
            if not latest_path:
                error_message = "Could not find trained model at {}.".format(self._model_dir)
                raise EstimatorNotTrainedError(error_message)
            checkpoint_path = latest_path

        # Setup output directory.
        eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name)

        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = training.create_global_step(g)
            features, labels = input_fn()

            estimator_spec = self._call_model_fn(features, labels, Modes.EVAL)
            if MetricKeys.LOSS in estimator_spec.eval_metric_ops:
                raise ValueError("Metric with name `{}` is not allowed, because Estimator "
                                 "already defines a default metric "
                                 "with the same name.".format(MetricKeys.LOSS))
            estimator_spec.eval_metric_ops[
                MetricKeys.LOSS] = metrics_lib.streaming_mean(estimator_spec.loss)
            update_op, eval_dict = self._extract_metric_update_ops(estimator_spec.eval_metric_ops)

            if ops.GraphKeys.GLOBAL_STEP in eval_dict:
                raise ValueError("Metric with name `global_step` is not allowed, because "
                                 "Estimator already defines a default metric with the same name.")
            eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step

            eval_results = evaluation._evaluate_once(
                checkpoint_path=checkpoint_path,
                master=self._config.evaluation_master,
                scaffold=estimator_spec.scaffold,
                eval_ops=update_op,
                final_ops=eval_dict,
                hooks=hooks,
                config=self._session_config)

            self._write_dict_to_summary(
                output_dir=eval_dir,
                dictionary=eval_results,
                current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

            return eval_results
Esempio n. 13
0
    def testOnlyFinalOp(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')

        # Train a model for a single step to get a checkpoint.
        self._train_model(checkpoint_dir, num_steps=1)
        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        # Create the model so we have something to restore.
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        logistic_classifier(inputs)

        final_increment = 9.0

        my_var = local_variable(0.0, name='MyVar')
        final_ops = array_ops.identity(my_var) + final_increment

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
        self.assertEqual(final_ops_values['value'], final_increment)
Esempio n. 14
0
  def testOnlyFinalOp(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')

    # Train a model for a single step to get a checkpoint.
    self._train_model(checkpoint_dir, num_steps=1)
    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    # Create the model so we have something to restore.
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    logistic_classifier(inputs)

    final_increment = 9.0

    my_var = local_variable(0.0, name='MyVar')
    final_ops = array_ops.identity(my_var) + final_increment

    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
    self.assertEqual(final_ops_values['value'], final_increment)
    def testEvaluateWithFiniteInputs(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_with_finite_inputs')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run evaluation. Inputs are fed through input producer for one epoch.
        all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        single_input, single_label = training.slice_input_producer(
            [all_inputs, all_labels], num_epochs=1)
        inputs, labels = training.batch([single_input, single_label],
                                        batch_size=6,
                                        allow_smaller_final_batch=True)

        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metrics.accuracy(predictions=predictions,
                                               labels=labels)

        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={
                'accuracy': accuracy,
                'eval_steps': evaluation._get_or_create_eval_step()
            },
            hooks=[
                evaluation._StopAfterNEvalsHook(None),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
        # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
        # each; the 3rd iter evaluates the remaining 4 inputs, and the last one
        # triggers an error which stops evaluation.
        self.assertEqual(final_ops_values['eval_steps'], 4)
Esempio n. 16
0
  def testEvaluatePerfectModel(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_perfect_model_once')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    labels = constant_op.constant(self._labels, dtype=dtypes.float32)
    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metrics.accuracy(
        predictions=predictions, labels=labels)

    checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

    final_ops_values = evaluation._evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[evaluation._StopAfterNEvalsHook(1),])
    self.assertTrue(final_ops_values['accuracy'] > .99)