Exemplo n.º 1
0
def BuildDiagnosticTable(
        # pylint: disable=invalid-name
        examples,
        eval_saved_model_path,
        desired_batch_size=None):
    """Build diagnostics for the spacified EvalSavedModel and example collection.

  Args:
    examples: PCollection of input examples. Can be any format the model accepts
      (e.g. string containing CSV row, TensorFlow.Example, etc).
    eval_saved_model_path: Path to EvalSavedModel. This directory should contain
      the saved_model.pb file.
    desired_batch_size: Optional batch size for batching in Predict and
      Aggregate.

  Returns:
    PCollection of ExampleAndExtracts
  """
    return (
        examples
        | 'ToExampleAndExtracts' >>
        beam.Map(lambda x: types.ExampleAndExtracts(example=x, extracts={}))
        | 'Predict' >> predict_extractor.TFMAPredict(
            eval_saved_model_path,
            add_metrics_callbacks=None,
            shared_handle=shared.Shared(),
            desired_batch_size=desired_batch_size)
        | 'ExtractFeatures' >> feature_extractor.ExtractFeatures())
    def testPredict(self):
        temp_eval_export_dir = self._getEvalExportDir()
        _, eval_export_dir = linear_classifier.simple_linear_classifier(
            None, temp_eval_export_dir)

        with beam.Pipeline() as pipeline:
            example1 = self._makeExample(age=3.0,
                                         language='english',
                                         label=1.0)
            example2 = self._makeExample(age=3.0,
                                         language='chinese',
                                         label=0.0)
            example3 = self._makeExample(age=4.0,
                                         language='english',
                                         label=1.0)
            example4 = self._makeExample(age=5.0,
                                         language='chinese',
                                         label=0.0)

            predict_extracts = (
                pipeline
                | beam.Create([
                    example1.SerializeToString(),
                    example2.SerializeToString(),
                    example3.SerializeToString(),
                    example4.SerializeToString()
                ])
                # Our diagnostic outputs, pass types.ExampleAndExtracts throughout,
                # however our aggregating functions do not use this interface.
                | beam.Map(
                    lambda x: types.ExampleAndExtracts(example=x, extracts={}))
                | 'Predict' >> predict_extractor.TFMAPredict(
                    eval_saved_model_path=eval_export_dir,
                    add_metrics_callbacks=None,
                    shared_handle=shared.Shared(),
                    desired_batch_size=3))

            def check_result(got):
                try:
                    self.assertEqual(4, len(got), 'got: %s' % got)
                    for item in got:
                        extracts_dict = item.extracts
                        self.assertTrue(extracts_dict.has_key('fpl'))
                        fpl = extracts_dict['fpl']
                        # Verify fpl contains features, probabilities, and correct labels.
                        self.assertIn('language', fpl.features)
                        self.assertIn('age', fpl.features)
                        self.assertIn('label', fpl.features)
                        self.assertIn('probabilities', fpl.predictions)
                        self.assertAlmostEqual(fpl.features['label'],
                                               fpl.labels['__labels'])
                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(predict_extracts, check_result)
Exemplo n.º 3
0
def PredictExtractor(eval_saved_model_path, add_metrics_callbacks,
                     shared_handle, desired_batch_size):
    # Map function which loads and runs the eval_saved_model against every
    # example, yielding an types.ExampleAndExtracts containing a
    # FeaturesPredictionsLabels value (where key is 'fpl').
    return types.Extractor(stage_name='Predict',
                           ptransform=predict_extractor.TFMAPredict(
                               eval_saved_model_path=eval_saved_model_path,
                               add_metrics_callbacks=add_metrics_callbacks,
                               shared_handle=shared_handle,
                               desired_batch_size=desired_batch_size))
  def testPredictMultipleExampleRefPerRawExampleBytes(self):
    temp_eval_export_dir = self._getEvalExportDir()
    _, eval_export_dir = (
        fake_multi_examples_per_input_estimator
        .fake_multi_examples_per_input_estimator(None, temp_eval_export_dir))
    eval_shared_model = types.EvalSharedModel(model_path=eval_export_dir)

    # The trailing zeros make an "empty" output batch.
    raw_example_bytes = ['0', '3', '1', '0', '2', '0', '0', '0', '0']

    def check_result(got):
      try:
        self.assertEqual(6, len(got), 'got: %s' % got)
        self.assertEqual(
            ['3', '3', '3', '1', '2', '2'],
            [example_and_extracts.example for example_and_extracts in got])

        for item in got:
          extracts_dict = item.extracts
          self.assertTrue('fpl' in extracts_dict)
          fpl = extracts_dict['fpl']
          self.assertIn('input_index', fpl.features)
          self.assertIn('example_count', fpl.features)
          self.assertIn('intra_input_index', fpl.features)

      except AssertionError as err:
        raise util.BeamAssertException(err)

    with beam.Pipeline() as pipeline:
      predict_extracts = (
          pipeline
          | beam.Create(raw_example_bytes)
          # Our diagnostic outputs, pass types.ExampleAndExtracts throughout,
          # however our aggregating functions do not use this interface.
          | beam.Map(lambda x: types.ExampleAndExtracts(example=x, extracts={}))
          | 'Predict' >> predict_extractor.TFMAPredict(
              eval_shared_model=eval_shared_model, desired_batch_size=3))

      util.assert_that(predict_extracts, check_result)
Exemplo n.º 5
0
def Evaluate(
    # pylint: disable=invalid-name
    examples,
    eval_saved_model_path,
    add_metrics_callbacks=None,
    slice_spec=None,
    desired_batch_size=None,
):
    """Evaluate the given EvalSavedModel on the given examples.

  This is for TFMA use only. Users should call tfma.EvaluateAndWriteResults
  instead of this function.

  Args:
    examples: PCollection of input examples. Can be any format the model accepts
      (e.g. string containing CSV row, TensorFlow.Example, etc).
    eval_saved_model_path: Path to EvalSavedModel. This directory should contain
      the saved_model.pb file.
    add_metrics_callbacks: Optional list of callbacks for adding additional
      metrics to the graph. The names of the metrics added by the callbacks
      should not conflict with existing metrics, or metrics added by other
      callbacks. See below for more details about what each callback should do.
    slice_spec: Optional list of SingleSliceSpec specifying the slices to slice
      the data into. If None, defaults to the overall slice.
    desired_batch_size: Optional batch size for batching in Predict and
      Aggregate.

  More details on add_metrics_callbacks:

    Each add_metrics_callback should have the following prototype:
      def add_metrics_callback(features_dict, predictions_dict, labels_dict):

    Note that features_dict, predictions_dict and labels_dict are not
    necessarily dictionaries - they might also be Tensors, depending on what the
    model's eval_input_receiver_fn returns.

    It should create and return a metric_ops dictionary, such that
    metric_ops['metric_name'] = (value_op, update_op), just as in the Trainer.

    Short example:

    def add_metrics_callback(features_dict, predictions_dict, labels):
      metrics_ops = {}
      metric_ops['mean_label'] = tf.metrics.mean(labels)
      metric_ops['mean_probability'] = tf.metrics.mean(tf.slice(
        predictions_dict['probabilities'], [0, 1], [2, 1]))
      return metric_ops

  Returns:
    DoOutputsTuple. The tuple entries are
    PCollection of (slice key, metrics) and
    PCollection of (slice key, plot metrics).
  """
    if slice_spec is None:
        slice_spec = [slicer.SingleSliceSpec()]

    shared_handle = shared.Shared()

    # pylint: disable=no-value-for-parameter
    return (
        examples
        # Our diagnostic outputs, pass types.ExampleAndExtracts throughout,
        # however our aggregating functions do not use this interface.
        | 'ToExampleAndExtracts' >>
        beam.Map(lambda x: types.ExampleAndExtracts(example=x, extracts={}))

        # Map function which loads and runs the eval_saved_model against every
        # example, yielding an types.ExampleAndExtracts containing a
        # FeaturesPredictionsLabels value (where key is 'fpl').
        | 'Predict' >> predict_extractor.TFMAPredict(
            eval_saved_model_path=eval_saved_model_path,
            add_metrics_callbacks=add_metrics_callbacks,
            shared_handle=shared_handle,
            desired_batch_size=desired_batch_size)

        # Input: one example fpl at a time
        # Output: one fpl example per slice key (notice that the example turns
        #         into n, replicated once per applicable slice key)
        | 'Slice' >> slice_api.Slice(slice_spec)

        # Each slice key lands on one shard where metrics are computed for all
        # examples in that shard -- the "map" and "reduce" parts of the
        # computation happen within this shard.
        # Output: Tuple[slicer.SliceKeyType, MetricVariablesType]
        |
        'Aggregate' >> _Aggregate(eval_saved_model_path=eval_saved_model_path,
                                  add_metrics_callbacks=add_metrics_callbacks,
                                  shared_handle=shared_handle,
                                  desired_batch_size=desired_batch_size)

        # Different metrics for a given slice key are brought together.
        | 'ExtractOutput' >> _ExtractOutput(
            eval_saved_model_path=eval_saved_model_path,
            add_metrics_callbacks=add_metrics_callbacks,
            shared_handle=shared_handle))