Ejemplo n.º 1
0
        def _test_metric_fn(metric_fn):
            input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
            estimator = linear.LinearClassifierV2([fc.numeric_column('x')])
            estimator = extenders.add_metrics(estimator, metric_fn)

            estimator.train(input_fn=input_fn)
            metrics = estimator.evaluate(input_fn=input_fn)
            self.assertEqual(2., metrics['two'])
Ejemplo n.º 2
0
    def test_should_error_out_for_not_recognized_args(self):
        estimator = linear.LinearClassifierV2([fc.numeric_column('x')])

        def metric_fn(features, not_recognized):
            _, _ = features, not_recognized
            return {}

        with self.assertRaisesRegexp(ValueError, 'not_recognized'):
            estimator = extenders.add_metrics(estimator, metric_fn)
Ejemplo n.º 3
0
    def test_all_supported_args(self):
        input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
        estimator = linear.LinearClassifierV2([fc.numeric_column('x')])

        def metric_fn(features, predictions, labels, config):
            self.assertIn('x', features)
            self.assertIsNotNone(labels)
            self.assertIn('logistic', predictions)
            self.assertTrue(isinstance(config, run_config.RunConfig))
            return {}

        estimator = extenders.add_metrics(estimator, metric_fn)

        estimator.train(input_fn=input_fn)
        estimator.evaluate(input_fn=input_fn)
Ejemplo n.º 4
0
        def _test_metric_fn(metric_fn):
            input_fn = get_input_fn(x=np.arange(4)[:, None, None],
                                    y=np.ones(4)[:, None])
            config = run_config.RunConfig(log_step_count_steps=1)
            estimator = linear.LinearClassifierV2([fc.numeric_column('x')],
                                                  config=config)

            estimator = extenders.add_metrics(estimator, metric_fn)

            estimator.train(input_fn=input_fn)
            metrics = estimator.evaluate(input_fn=input_fn)
            self.assertIn('mean_x', metrics)
            self.assertEqual(1.5, metrics['mean_x'])
            # assert that it keeps original estimators metrics
            self.assertIn('auc', metrics)
Ejemplo n.º 5
0
  def test_custom_metrics(self):
    """Tests that the custom metrics can be applied to the estimator."""
    model_dir = self.get_temp_dir()
    estimator = ts_estimators.LSTMAutoRegressor(
        periodicities=1,
        input_window_size=1,
        output_window_size=1,
        num_features=1,
        num_units=4,
        optimizer=tf.compat.v1.train.AdamOptimizer(0.001),
        config=estimator_lib.RunConfig(tf_random_seed=4),
        model_dir=model_dir)

    def input_fn():
      return {
          feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3], [7, 8, 9]],
          feature_keys.TrainEvalFeatures.VALUES:
              numpy.array([[[0.], [1.], [0.]], [[2.], [3.], [2.]]])
      }

    def metrics_fn(predictions, features):
      # checking that the inputs are properly passed.
      predict = predictions["mean"]
      target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
      return {
          "plain_boring_metric386":
              (tf.math.reduce_mean(tf.math.abs(predict - target)),
               tf.no_op()),
          "fun_metric101": (tf.math.reduce_sum(predict + target),
                            tf.no_op()),
      }

    # Evaluation without training is enough for testing custom metrics.
    estimator = extenders.add_metrics(estimator, metrics_fn)
    evaluation = estimator.evaluate(input_fn, steps=1)
    self.assertIn("plain_boring_metric386", evaluation)
    self.assertIn("fun_metric101", evaluation)
    self.assertIn("average_loss", evaluation)
Ejemplo n.º 6
0
    def test_canned_estimator(self, distribution, estimator_cls):
        label_dimension = 2
        batch_size = 10
        # Adding one extra row (+ label_dimension) to test the last partial batch
        # use case.
        data = np.linspace(0.,
                           2.,
                           batch_size * label_dimension + label_dimension,
                           dtype=np.float32)
        data = data.reshape(batch_size + 1, label_dimension)
        fc = tf.feature_column.numeric_column('x', shape=(2, ))

        # Set kwargs based on the current canned estimator class.
        estimator_kw_args = {
            'model_dir': self._model_dir,
            'label_dimension': 2,
        }

        cls_args = inspect.getargspec(estimator_cls.__init__).args
        if 'hidden_units' in cls_args:
            estimator_kw_args['hidden_units'] = [2, 2]
        elif 'dnn_hidden_units' in cls_args:
            estimator_kw_args['dnn_hidden_units'] = [2, 2]

        if 'optimizer' in cls_args:
            estimator_kw_args['optimizer'] = 'SGD'
        else:
            estimator_kw_args['linear_optimizer'] = 'SGD'
            estimator_kw_args['dnn_optimizer'] = 'SGD'

        if 'feature_columns' in cls_args:
            estimator_kw_args['feature_columns'] = [fc]
        else:
            estimator_kw_args['linear_feature_columns'] = [fc]
            estimator_kw_args['dnn_feature_columns'] = [fc]

        def my_metrics(features):
            metric = metrics.Mean()
            metric.update_state(features['x'])
            return {'mean_x': metric}

        # Create a canned estimator and train to save a checkpoint.
        input_fn = self.dataset_input_fn(x={'x': data},
                                         y=data,
                                         batch_size=batch_size,
                                         shuffle=False)
        canned_est = estimator_cls(**estimator_kw_args)
        canned_est.train(input_fn=input_fn)

        # Create a second canned estimator, warm-started from the first.
        del estimator_kw_args['model_dir']
        estimator_kw_args['warm_start_from'] = canned_est.model_dir
        warm_started_canned_est = estimator_cls(**estimator_kw_args)
        warm_started_canned_est.train(input_fn=input_fn)

        # Create a third canned estimator, warm-started from the first.
        input_fn = self.dataset_input_fn(x={'x': data},
                                         y=data,
                                         batch_size=batch_size //
                                         distribution.num_replicas_in_sync,
                                         shuffle=False)
        estimator_kw_args['config'] = run_config.RunConfig(
            train_distribute=distribution, eval_distribute=distribution)
        warm_started_canned_est_with_ds = estimator_cls(**estimator_kw_args)
        warm_started_canned_est_with_ds.train(input_fn=input_fn)

        for variable_name in warm_started_canned_est.get_variable_names():
            self.assertAllClose(
                warm_started_canned_est_with_ds.get_variable_value(
                    variable_name),
                warm_started_canned_est.get_variable_value(variable_name))

        warm_started_canned_est = add_metrics(warm_started_canned_est,
                                              my_metrics)
        warm_started_canned_est_with_ds = add_metrics(
            warm_started_canned_est_with_ds, my_metrics)

        scores = warm_started_canned_est.evaluate(input_fn)
        scores_with_ds = warm_started_canned_est_with_ds.evaluate(input_fn)
        self.assertAlmostEqual(scores['loss'], scores_with_ds['loss'], 5)
        self.assertAlmostEqual(scores['mean_x'], scores_with_ds['mean_x'], 5)