示例#1
0
  def test_should_error_out_for_not_recognized_args(self):
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn(features, not_recognized):
      _, _ = features, not_recognized
      return {}

    with self.assertRaisesRegexp(ValueError, 'not_recognized'):
      estimator = extenders.add_metrics(estimator, metric_fn)
示例#2
0
  def test_should_error_out_for_not_recognized_args(self):
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn(features, not_recognized):
      _, _ = features, not_recognized
      return {}

    with self.assertRaisesRegexp(ValueError, 'not_recognized'):
      estimator = extenders.add_metrics(estimator, metric_fn)
示例#3
0
  def test_all_args_are_optional(self):
    input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn():
      return {'two': metrics_lib.mean(constant_op.constant([2.]))}

    estimator = extenders.add_metrics(estimator, metric_fn)

    estimator.train(input_fn=input_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertEqual(2., metrics['two'])
示例#4
0
  def test_all_args_are_optional(self):
    input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn():
      return {'two': metrics_lib.mean(constant_op.constant([2.]))}

    estimator = extenders.add_metrics(estimator, metric_fn)

    estimator.train(input_fn=input_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertEqual(2., metrics['two'])
示例#5
0
  def test_overrides_existing_metrics(self):
    input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])
    estimator.train(input_fn=input_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertNotEqual(2., metrics['auc'])

    def metric_fn():
      return {'auc': metrics_lib.mean(constant_op.constant([2.]))}

    estimator = extenders.add_metrics(estimator, metric_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertEqual(2., metrics['auc'])
示例#6
0
  def test_overrides_existing_metrics(self):
    input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])
    estimator.train(input_fn=input_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertNotEqual(2., metrics['auc'])

    def metric_fn():
      return {'auc': metrics_lib.mean(constant_op.constant([2.]))}

    estimator = extenders.add_metrics(estimator, metric_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertEqual(2., metrics['auc'])
示例#7
0
  def test_all_supported_args_in_different_order(self):
    input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn(labels, config, features, predictions):
      self.assertIn('x', features)
      self.assertIsNotNone(labels)
      self.assertIn('logistic', predictions)
      self.assertTrue(isinstance(config, estimator_lib.RunConfig))
      return {}

    estimator = extenders.add_metrics(estimator, metric_fn)

    estimator.train(input_fn=input_fn)
    estimator.evaluate(input_fn=input_fn)
示例#8
0
  def test_all_supported_args_in_different_order(self):
    input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn(labels, config, features, predictions):
      self.assertIn('x', features)
      self.assertIsNotNone(labels)
      self.assertIn('logistic', predictions)
      self.assertTrue(isinstance(config, run_config.RunConfig))
      return {}

    estimator = extenders.add_metrics(estimator, metric_fn)

    estimator.train(input_fn=input_fn)
    estimator.evaluate(input_fn=input_fn)
示例#9
0
  def test_should_add_metrics(self):
    input_fn = get_input_fn(
        x=np.arange(4)[:, None, None], y=np.ones(4)[:, None])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn(features):
      return {'mean_x': metrics_lib.mean(features['x'])}

    estimator = extenders.add_metrics(estimator, metric_fn)

    estimator.train(input_fn=input_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertIn('mean_x', metrics)
    self.assertEqual(1.5, metrics['mean_x'])
    # assert that it keeps original estimators metrics
    self.assertIn('auc', metrics)
示例#10
0
  def test_should_add_metrics(self):
    input_fn = get_input_fn(
        x=np.arange(4)[:, None, None], y=np.ones(4)[:, None])
    estimator = linear.LinearClassifier([fc.numeric_column('x')])

    def metric_fn(features):
      return {'mean_x': metrics_lib.mean(features['x'])}

    estimator = extenders.add_metrics(estimator, metric_fn)

    estimator.train(input_fn=input_fn)
    metrics = estimator.evaluate(input_fn=input_fn)
    self.assertIn('mean_x', metrics)
    self.assertEqual(1.5, metrics['mean_x'])
    # assert that it keeps original estimators metrics
    self.assertIn('auc', metrics)
示例#11
0
    def test_custom_metrics(self):
        """Tests that the custom metrics can be applied to the estimator."""
        model_dir = self.get_temp_dir()
        estimator = ts_estimators.TimeSeriesRegressor(
            model=lstm_example._LSTMModel(num_features=1, num_units=4),
            optimizer=adam.AdamOptimizer(0.001),
            config=estimator_lib.RunConfig(tf_random_seed=4),
            model_dir=model_dir)

        def input_fn():
            return {
                feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3], [7, 8, 9]],
                feature_keys.TrainEvalFeatures.VALUES:
                numpy.array([[[0.], [1.], [0.]], [[2.], [3.], [2.]]])
            }

        def metrics_fn(predictions, features):
            # checking that the inputs are properly passed.
            predict = predictions["mean"]
            target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
            return {
                "plain_boring_metric386":
                (math_ops.reduce_mean(math_ops.abs(predict - target)),
                 control_flow_ops.no_op()),
                "fun_metric101": (math_ops.reduce_sum(predict + target),
                                  control_flow_ops.no_op()),
            }

        # Evaluation without training is enough for testing custom metrics.
        estimator = extenders.add_metrics(estimator, metrics_fn)
        evaluation = estimator.evaluate(input_fn, steps=1)
        self.assertIn("plain_boring_metric386", evaluation)
        self.assertIn("fun_metric101", evaluation)
        self.assertIn("average_loss", evaluation)
        # The values are deterministic because of fixed tf_random_seed.
        # However if they become flaky, remove such exacts comparisons.
        self.assertAllClose(evaluation["plain_boring_metric386"], 1.130380)
        self.assertAllClose(evaluation["fun_metric101"], 10.435442)
示例#12
0
  def test_custom_metrics(self):
    """Tests that the custom metrics can be applied to the estimator."""
    model_dir = self.get_temp_dir()
    estimator = ts_estimators.TimeSeriesRegressor(
        model=lstm_example._LSTMModel(num_features=1, num_units=4),
        optimizer=adam.AdamOptimizer(0.001),
        config=estimator_lib.RunConfig(tf_random_seed=4),
        model_dir=model_dir)

    def input_fn():
      return {
          feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3], [7, 8, 9]],
          feature_keys.TrainEvalFeatures.VALUES:
              numpy.array([[[0.], [1.], [0.]], [[2.], [3.], [2.]]])
      }

    def metrics_fn(predictions, features):
      # checking that the inputs are properly passed.
      predict = predictions["mean"]
      target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
      return {
          "plain_boring_metric386":
              (math_ops.reduce_mean(math_ops.abs(predict - target)),
               control_flow_ops.no_op()),
          "fun_metric101": (math_ops.reduce_sum(predict + target),
                            control_flow_ops.no_op()),
      }

    # Evaluation without training is enough for testing custom metrics.
    estimator = extenders.add_metrics(estimator, metrics_fn)
    evaluation = estimator.evaluate(input_fn, steps=1)
    self.assertIn("plain_boring_metric386", evaluation)
    self.assertIn("fun_metric101", evaluation)
    self.assertIn("average_loss", evaluation)
    # The values are deterministic because of fixed tf_random_seed.
    # However if they become flaky, remove such exacts comparisons.
    self.assertAllClose(evaluation["plain_boring_metric386"], 1.130380)
    self.assertAllClose(evaluation["fun_metric101"], 10.435442)