Exemplo n.º 1
0
 def testGetModelAndOutputNamesMultiModel(self):
     eval_config = config_pb2.EvalConfig(model_specs=[
         config_pb2.ModelSpec(name=constants.BASELINE_KEY),
         config_pb2.ModelSpec(name=constants.CANDIDATE_KEY)
     ])
     self.assertEqual([(constants.BASELINE_KEY, None),
                       (constants.CANDIDATE_KEY, None)],
                      util.StandardExtracts({
                          constants.PREDICTIONS_KEY: {
                              constants.BASELINE_KEY: np.array([]),
                              constants.CANDIDATE_KEY: np.array([])
                          }
                      }).get_model_and_output_names(eval_config))
Exemplo n.º 2
0
    def _data_spec_to_model_spec(
            self,
            me_data_spec: me_proto.DataSpec,
            model_name: Optional[str] = None) -> config_pb2.ModelSpec:
        """Convert ME DataSpec into TFMA ModelSpec.

    Args:
      me_data_spec: Input ME DataSpec.
      model_name: Input model name.

    Returns:
      TFMA ModelSpec.
    """
        if not me_data_spec:
            return None
        tfma_model_spec = config_pb2.ModelSpec()
        if model_name:
            tfma_model_spec.name = model_name
        if me_data_spec.HasField('label_key_spec'):
            tfma_model_spec.label_key = ColumnSpec(
                me_data_spec.label_key_spec).as_string() + (
                    constants.Data.K_HOT_KABEL_KEY_SUFFIX
                    if self._class_name_list else '')
        if me_data_spec.HasField('predicted_score_key_spec'):
            tfma_model_spec.prediction_key = ColumnSpec(
                me_data_spec.predicted_score_key_spec).as_string() + (
                    constants.Data.POINT_KEY_SUFFIX if model_name
                    and model_name.endswith(constants.Data.POINT_KEY_SUFFIX)
                    else '')
        if me_data_spec.HasField('example_weight_key_spec'):
            tfma_model_spec.example_weight_key = ColumnSpec(
                me_data_spec.example_weight_key_spec).as_string()
        return tfma_model_spec
Exemplo n.º 3
0
 def testGetModelAndOutputNamesEmptyPredictions(self):
     eval_config = config_pb2.EvalConfig(
         model_specs=[config_pb2.ModelSpec()])
     self.assertEmpty(
         util.StandardExtracts({
             constants.PREDICTIONS_KEY: {}
         }).get_model_and_output_names(eval_config))
Exemplo n.º 4
0
    def testStandardMetricInputsWithCustomLabelKeys(self):
        example = metric_types.StandardMetricInputs(
            labels={
                'custom_label': np.array([2]),
                'other_label': np.array([0])
            },
            predictions={'custom_prediction': np.array([0, 0.5, 0.3, 0.9])},
            example_weights=np.array([1.0]))
        eval_config = config_pb2.EvalConfig(model_specs=[
            config_pb2.ModelSpec(label_key='custom_label',
                                 prediction_key='custom_prediction')
        ])
        iterator = metric_util.to_label_prediction_example_weight(
            example, eval_config=eval_config)

        for expected_label, expected_prediction in zip((0.0, 0.0, 1.0, 0.0),
                                                       (0.0, 0.5, 0.3, 0.9)):
            got_label, got_pred, got_example_weight = next(iterator)
            self.assertAllClose(got_label,
                                np.array([expected_label]),
                                atol=0,
                                rtol=0)
            self.assertAllClose(got_pred,
                                np.array([expected_prediction]),
                                atol=0,
                                rtol=0)
            self.assertAllClose(got_example_weight,
                                np.array([1.0]),
                                atol=0,
                                rtol=0)
Exemplo n.º 5
0
 def testGetFeatureValuesForModelSpecFieldNoValues(self):
     model_spec = config_pb2.ModelSpec(name='model1',
                                       example_weight_key='feature2')
     extracts = {}
     got = model_util.get_feature_values_for_model_spec_field(
         [model_spec], 'example_weight', 'example_weights', extracts)
     self.assertIsNone(got)
    def test_features_extractor_no_features(self):
        model_spec = config_pb2.ModelSpec()
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        tfx_io = tf_example_record.TFExampleBeamRecord(
            raw_record_column_name=constants.ARROW_INPUT_COLUMN,
            physical_format='inmem',
            telemetry_descriptors=['testing'])

        with beam.Pipeline() as pipeline:
            result = (
                pipeline | 'Create' >> beam.Create([b''] * 3)
                | 'DecodeToRecordBatch' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform)

            def check_result(got):
                self.assertLen(got, 1)
                self.assertIn(constants.FEATURES_KEY, got[0])
                self.assertEmpty(got[0][constants.FEATURES_KEY])
                self.assertIn(constants.INPUT_KEY, got[0])
                self.assertLen(got[0][constants.INPUT_KEY], 3)

            util.assert_that(result, check_result, label='CheckResult')
Exemplo n.º 7
0
    def testSliceKeys(self, model_names, extracts, slice_specs,
                      expected_slices):
        eval_config = config_pb2.EvalConfig(model_specs=[
            config_pb2.ModelSpec(name=name) for name in model_names
        ])
        with beam.Pipeline() as pipeline:
            slice_keys_extracts = (
                pipeline
                | 'CreateTestInput' >> beam.Create(extracts)
                | 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
                    slice_spec=slice_specs, eval_config=eval_config))

            def check_result(got):
                try:
                    self.assertLen(got, 2)
                    got_results = []
                    for item in got:
                        self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)
                        got_results.append(
                            sorted(item[constants.SLICE_KEY_TYPES_KEY]))
                    self.assertCountEqual(got_results, expected_slices)
                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(slice_keys_extracts, check_result)
Exemplo n.º 8
0
    def testModelSignaturesDoFn(self, save_as_keras, signature_names,
                                default_signature_names, prefer_dict_outputs,
                                use_schema, expected_num_outputs):
        export_path = self.createModelWithMultipleDenseInputs(save_as_keras)
        eval_shared_models = {}
        model_specs = []
        for sigs in signature_names.values():
            for model_name in sigs:
                if model_name not in eval_shared_models:
                    eval_shared_models[
                        model_name] = self.createTestEvalSharedModel(
                            eval_saved_model_path=export_path,
                            model_name=model_name,
                            tags=[tf.saved_model.SERVING])
                    model_specs.append(config_pb2.ModelSpec(name=model_name))
        eval_config = config_pb2.EvalConfig(model_specs=model_specs)
        schema = self.createDenseInputsSchema() if use_schema else None
        tfx_io = tf_example_record.TFExampleBeamRecord(
            physical_format='text',
            schema=schema,
            raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        examples = [
            self._makeExample(input_1=1.0, input_2=2.0),
            self._makeExample(input_1=3.0, input_2=4.0),
            self._makeExample(input_1=5.0, input_2=6.0),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (pipeline
                      | 'Create' >> beam.Create(
                          [e.SerializeToString() for e in examples])
                      | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                      | 'ToExtracts' >> beam.Map(_record_batch_to_extracts)
                      | 'ModelSignatures' >> beam.ParDo(
                          model_util.ModelSignaturesDoFn(
                              eval_config=eval_config,
                              eval_shared_models=eval_shared_models,
                              signature_names=signature_names,
                              default_signature_names=default_signature_names,
                              prefer_dict_outputs=prefer_dict_outputs)))

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    for key in signature_names:
                        self.assertIn(key, got[0])
                        if prefer_dict_outputs:
                            self.assertIsInstance(got[0][key], dict)
                            self.assertEqual(tfma_util.batch_size(got[0][key]),
                                             expected_num_outputs)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 9
0
  def testBatchSizeLimit(self):
    temp_export_dir = self._getExportDir()
    _, export_dir = batch_size_limited_classifier.simple_batch_size_limited_classifier(
        None, temp_export_dir)
    eval_shared_model = self.createTestEvalSharedModel(
        eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
    eval_config = config_pb2.EvalConfig(model_specs=[config_pb2.ModelSpec()])
    schema = text_format.Parse(
        """
        feature {
          name: "classes"
          type: BYTES
        }
        feature {
          name: "scores"
          type: FLOAT
        }
        feature {
          name: "labels"
          type: BYTES
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
        arrow_schema=tfx_io.ArrowSchema(),
        tensor_representations=tfx_io.TensorRepresentations())
    feature_extractor = features_extractor.FeaturesExtractor(
        eval_config=eval_config,
        tensor_representations=tensor_adapter_config.tensor_representations)
    prediction_extractor = predictions_extractor.PredictionsExtractor(
        eval_config=eval_config, eval_shared_model=eval_shared_model)

    examples = []
    for _ in range(4):
      examples.append(
          self._makeExample(classes='first', scores=0.0, labels='third'))

    with beam.Pipeline() as pipeline:
      predict_extracts = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=1)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | feature_extractor.stage_name >> feature_extractor.ptransform
          | prediction_extractor.stage_name >> prediction_extractor.ptransform)

      def check_result(got):
        try:
          self.assertLen(got, 4)
          # We can't verify the actual predictions, but we can verify the keys.
          for item in got:
            self.assertIn(constants.PREDICTIONS_KEY, item)

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(predict_extracts, check_result, label='result')
    def testCustomTFMetricWithPadding(self, example_indices, expected):
        computation = tf_metric_wrapper.tf_metric_computations(
            [
                _CustomMetric(name='custom_label', update_y_pred=False),
                _CustomMetric(name='custom_pred', update_y_pred=True),
            ],
            eval_config=config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(padding_options=config_pb2.PaddingOptions(
                    label_int_padding=-1,
                    prediction_float_padding=-1.0,
                ))
            ]),
            example_weighted=True)[0]

        examples = [{
            'labels': np.array([1], dtype=np.int64),
            'predictions': np.array([0.1, 0.2, 0.3, 0.0]),
            'example_weights': np.array([1.0])
        }, {
            'labels': np.array([1, 2], dtype=np.int64),
            'predictions': np.array([0.1, 0.2, 0.0]),
            'example_weights': np.array([1.0])
        }, {
            'labels': np.array([1, 2, 3], dtype=np.int64),
            'predictions': np.array([0.1, 0.2, 0.3]),
            'example_weights': np.array([2.0])
        }]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                |
                'Create' >> beam.Create([examples[i] for i in example_indices])
                | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
                | 'AddSlice' >> beam.Map(lambda x: ((), x))
                | 'Combine' >> beam.CombinePerKey(computation.combiner))

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    got_slice_key, got_metrics = got[0]
                    self.assertEqual(got_slice_key, ())

                    custom_label_key = metric_types.MetricKey(
                        name='custom_label', example_weighted=True)
                    custom_pred_key = metric_types.MetricKey(
                        name='custom_pred', example_weighted=True)
                    self.assertDictElementsAlmostEqual(got_metrics, expected)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 11
0
 def testGetFeatureValuesForModelSpecFieldNoValues(self):
     model_spec = config_pb2.ModelSpec(name='model1',
                                       example_weight_key='feature2')
     extracts = {
         constants.ARROW_RECORD_BATCH_KEY:
         pa.RecordBatch.from_arrays([pa.array([1])], ['dummy']),
     }
     got = model_util.get_feature_values_for_model_spec_field(
         [model_spec], 'example_weight', 'example_weights', extracts)
     self.assertIsNone(got)
Exemplo n.º 12
0
 def testGetModelAndOutputNamesMultiOutput(self):
     eval_config = config_pb2.EvalConfig(
         model_specs=[config_pb2.ModelSpec()])
     self.assertEqual([(None, 'output1'), (None, 'output2')],
                      util.StandardExtracts({
                          constants.PREDICTIONS_KEY: {
                              'output1': np.array([]),
                              'output2': np.array([])
                          }
                      }).get_model_and_output_names(eval_config))
Exemplo n.º 13
0
def get_evaluation_config(
    problem_type: constants.ProblemType,
    evaluation_column_specs: EvaluationColumnSpecs,
    slice_features: List[List[ColumnSpec]],
    class_names: Optional[List[Text]] = None,
    positive_class_names: Optional[List[Text]] = None,
    top_k_list: Optional[List[int]] = None
) -> model_evaluation_pb2.EvaluationConfig:
  """Build a Model Evaluation Configuration.

  Args:
    problem_type: One of the ProblemType enum.
    evaluation_column_specs: column specs necessary for parsing evaluation data.
    slice_features: List of slice specs, each a list of keys to slice. The
      default slice over all values will automatically be added.
    class_names: For classification-type problems, a list of string names for
      classes.
    positive_class_names: For classification-type problems, a list of string
      names for classes to be treated as positively valued.
    top_k_list: For classification-type problems, if specified, a list of top-k
      aggregations.

  Returns:
    An EvaluationConfig.
  """
  tfma_eval_config = config_pb2.EvalConfig()

  tfma_eval_config.model_specs.append(
      config_pb2.ModelSpec(
          prediction_key=evaluation_column_specs.predicted_score_column_spec
          .as_string(),
          prediction_keys=None,
          label_key=evaluation_column_specs.ground_truth_column_spec.as_string(
          ),
          label_keys=None))

  metric_specs = _get_metric_specs(problem_type, class_names,
                                   positive_class_names, top_k_list)
  assert metric_specs, 'At least one metric_spec must be defined %r' % metric_specs
  tfma_eval_config.metrics_specs.extend(metric_specs)

  slicing_specs = _get_tfma_slicing_specs(slice_features)
  assert slicing_specs, 'At least one slicing_spec must be defined %r' % slicing_specs
  tfma_eval_config.slicing_specs.extend(slicing_specs)

  adapter = tfma_adapter.TFMAToME(
      class_name_list=class_names,
      predicted_label_column_spec=evaluation_column_specs
      .predicted_label_column_spec,
      predicted_label_id_column_spec=evaluation_column_specs
      .predicted_label_id_column_spec)
  return adapter.eval_config(tfma_eval_config)
Exemplo n.º 14
0
    def testBatchedPredict(self):
        temp_eval_export_dir = self._getEvalExportDir()
        _, eval_export_dir = linear_classifier.simple_linear_classifier(
            None, temp_eval_export_dir)
        eval_shared_model = model_eval_lib.default_eval_shared_model(
            eval_saved_model_path=eval_export_dir)
        eval_config = config_pb2.EvalConfig(
            model_specs=[config_pb2.ModelSpec()])
        with beam.Pipeline() as pipeline:
            examples = [
                self._makeExample(age=3.0, language='english', label=1.0),
                self._makeExample(age=3.0, language='chinese', label=0.0),
                self._makeExample(age=4.0, language='english', label=1.0),
                self._makeExample(age=5.0, language='chinese', label=0.0),
            ]
            serialized_examples = [e.SerializeToString() for e in examples]

            tfx_io = raw_tf_record.RawBeamRecordTFXIO(
                physical_format='inmemory',
                raw_record_column_name=constants.ARROW_INPUT_COLUMN,
                telemetry_descriptors=['TFMATest'])
            extractor = predict_extractor.PredictExtractor(
                eval_shared_model, eval_config=eval_config)
            predict_extracts = (
                pipeline
                | 'Create' >> beam.Create(serialized_examples, reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | 'Predict' >> extractor.ptransform)

            def check_result(got):
                try:
                    self.assertLen(got, 2)
                    for item in got:
                        self.assertIn(constants.FEATURES_KEY, item)
                        for feature in ('language', 'age'):
                            for features_dict in item[constants.FEATURES_KEY]:
                                self.assertIn(feature, features_dict)
                        self.assertIn(constants.LABELS_KEY, item)
                        self.assertIn(constants.PREDICTIONS_KEY, item)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(predict_extracts, check_result, label='result')
Exemplo n.º 15
0
    def testModelSignaturesDoFnError(self):
        export_path = self.createModelWithInvalidOutputShape()
        signature_names = {constants.PREDICTIONS_KEY: {'': [None]}}
        eval_shared_models = {
            '':
            self.createTestEvalSharedModel(eval_saved_model_path=export_path,
                                           tags=[tf.saved_model.SERVING])
        }
        model_specs = [config_pb2.ModelSpec()]
        eval_config = config_pb2.EvalConfig(model_specs=model_specs)
        schema = self.createDenseInputsSchema()
        tfx_io = tf_example_record.TFExampleBeamRecord(
            physical_format='text',
            schema=schema,
            raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())

        examples = [
            self._makeExample(input_1=1.0, input_2=2.0),
            self._makeExample(input_1=3.0, input_2=4.0),
            self._makeExample(input_1=5.0, input_2=6.0),
        ]

        with self.assertRaisesRegex(
                ValueError,
                'First dimension does not correspond with batch size.'):
            with beam.Pipeline() as pipeline:
                # pylint: disable=no-value-for-parameter
                _ = (pipeline
                     | 'Create' >> beam.Create(
                         [e.SerializeToString() for e in examples])
                     | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                     | 'ToExtracts' >> beam.Map(_record_batch_to_extracts)
                     | 'ModelSignatures' >> beam.ParDo(
                         model_util.ModelSignaturesDoFn(
                             eval_config=eval_config,
                             eval_shared_models=eval_shared_models,
                             signature_names=signature_names,
                             default_signature_names=None,
                             prefer_dict_outputs=False,
                             tensor_adapter_config=tensor_adapter_config)))
class CalibrationPlotTest(testutil.TensorflowModelAnalysisTest,
                          parameterized.TestCase):
    def testCalibrationPlot(self):
        computations = calibration_plot.CalibrationPlot(
            num_buckets=10).computations(example_weighted=True)
        histogram = computations[0]
        plot = computations[1]

        example1 = {
            'labels': np.array([0.0]),
            'predictions': np.array([0.2]),
            'example_weights': np.array([1.0])
        }
        example2 = {
            'labels': np.array([1.0]),
            'predictions': np.array([0.8]),
            'example_weights': np.array([2.0])
        }
        example3 = {
            'labels': np.array([0.0]),
            'predictions': np.array([0.5]),
            'example_weights': np.array([3.0])
        }
        example4 = {
            'labels': np.array([1.0]),
            'predictions': np.array([-0.1]),
            'example_weights': np.array([4.0])
        }
        example5 = {
            'labels': np.array([1.0]),
            'predictions': np.array([0.5]),
            'example_weights': np.array([5.0])
        }
        example6 = {
            'labels': np.array([1.0]),
            'predictions': np.array([0.8]),
            'example_weights': np.array([6.0])
        }
        example7 = {
            'labels': np.array([0.0]),
            'predictions': np.array([0.2]),
            'example_weights': np.array([7.0])
        }
        example8 = {
            'labels': np.array([1.0]),
            'predictions': np.array([1.1]),
            'example_weights': np.array([8.0])
        }

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create([
                    example1, example2, example3, example4, example5, example6,
                    example7, example8
                ])
                | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
                | 'AddSlice' >> beam.Map(lambda x: ((), x))
                | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
                |
                'ComputePlot' >> beam.Map(lambda x: (x[0], plot.result(x[1]))))

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    got_slice_key, got_plots = got[0]
                    self.assertEqual(got_slice_key, ())
                    self.assertLen(got_plots, 1)
                    key = metric_types.PlotKey(name='calibration_plot',
                                               example_weighted=True)
                    self.assertIn(key, got_plots)
                    got_plot = got_plots[key]
                    self.assertProtoEquals(
                        """
              buckets {
                lower_threshold_inclusive: -inf
                upper_threshold_exclusive: 0.0
                total_weighted_label {
                  value: 4.0
                }
                total_weighted_refined_prediction {
                  value: -0.4
                }
                num_weighted_examples {
                  value: 4.0
                }
              }
              buckets {
                lower_threshold_inclusive: 0.0
                upper_threshold_exclusive: 0.1
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                }
                num_weighted_examples {
                }
              }
              buckets {
                lower_threshold_inclusive: 0.1
                upper_threshold_exclusive: 0.2
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                }
                num_weighted_examples {
                }
              }
              buckets {
                lower_threshold_inclusive: 0.2
                upper_threshold_exclusive: 0.3
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                  value: 1.6
                }
                num_weighted_examples {
                  value: 8.0
                }
              }
              buckets {
                lower_threshold_inclusive: 0.3
                upper_threshold_exclusive: 0.4
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                }
                num_weighted_examples {
                }
              }
              buckets {
                lower_threshold_inclusive: 0.4
                upper_threshold_exclusive: 0.5
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                }
                num_weighted_examples {
                }
              }
              buckets {
                lower_threshold_inclusive: 0.5
                upper_threshold_exclusive: 0.6
                total_weighted_label {
                  value: 5.0
                }
                total_weighted_refined_prediction {
                  value: 4.0
                }
                num_weighted_examples {
                  value: 8.0
                }
              }
              buckets {
                lower_threshold_inclusive: 0.6
                upper_threshold_exclusive: 0.7
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                }
                num_weighted_examples {
                }
              }
              buckets {
                lower_threshold_inclusive: 0.7
                upper_threshold_exclusive: 0.8
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                }
                num_weighted_examples {
                }
              }
              buckets {
                lower_threshold_inclusive: 0.8
                upper_threshold_exclusive: 0.9
                total_weighted_label {
                  value: 8.0
                }
                total_weighted_refined_prediction {
                  value: 6.4
                }
                num_weighted_examples {
                  value: 8.0
                }
              }
              buckets {
                lower_threshold_inclusive: 0.9
                upper_threshold_exclusive: 1.0
                total_weighted_label {
                }
                total_weighted_refined_prediction {
                }
                num_weighted_examples {
                }
              }
              buckets {
                lower_threshold_inclusive: 1.0
                upper_threshold_exclusive: inf
                total_weighted_label {
                  value: 8.0
                }
                total_weighted_refined_prediction {
                  value: 8.8
                }
                num_weighted_examples {
                  value: 8.0
                }
              }
          """, got_plot)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')

    @parameterized.named_parameters(
        {
            'testcase_name':
            'int_single_model',
            'eval_config':
            config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(name='model1', label_key='label'),
            ]),
            'schema':
            text_format.Parse(
                """
              feature {
                name: "label"
                type: INT
                int_domain {
                  min: 5
                  max: 15
                }
              }
              """, schema_pb2.Schema()),
            'model_names': [''],
            'output_names': [''],
            'expected_left':
            5.0,
            'expected_range':
            10.0,
        }, {
            'testcase_name':
            'int_single_model_right_only',
            'eval_config':
            config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(name='model1', label_key='label'),
            ]),
            'schema':
            text_format.Parse(
                """
              feature {
                name: "label"
                type: INT
                int_domain {
                  max: 15
                }
              }
              """, schema_pb2.Schema()),
            'model_names': [''],
            'output_names': [''],
            'expected_left':
            0.0,
            'expected_range':
            15.0,
        }, {
            'testcase_name':
            'int_single_model_schema_missing_domain',
            'eval_config':
            config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(name='model1', label_key='label'),
            ]),
            'schema':
            text_format.Parse(
                """
              feature {
                name: "label"
                type: FLOAT
              }
              """, schema_pb2.Schema()),
            'model_names': [''],
            'output_names': [''],
            'expected_left':
            0.0,
            'expected_range':
            1.0,
        }, {
            'testcase_name':
            'int_single_model_schema_missing_label',
            'eval_config':
            config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(name='model1', label_key='label'),
            ]),
            'schema':
            text_format.Parse(
                """
              feature {
                name: "other_feature"
                type: BYTES
              }
              """, schema_pb2.Schema()),
            'model_names': [''],
            'output_names': [''],
            'expected_left':
            0.0,
            'expected_range':
            1.0,
        }, {
            'testcase_name':
            'float_single_model',
            'eval_config':
            config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(name='model1', label_key='label'),
            ]),
            'schema':
            text_format.Parse(
                """
              feature {
                name: "label"
                type: FLOAT
                float_domain {
                  min: 5.0
                  max: 15.0
                }
              }
              """, schema_pb2.Schema()),
            'model_names': [''],
            'output_names': [''],
            'expected_left':
            5.0,
            'expected_range':
            10.0
        }, {
            'testcase_name':
            'float_single_model_multiple_outputs',
            'eval_config':
            config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(name='model1',
                                     label_keys={
                                         'output1': 'label1',
                                         'output2': 'label2'
                                     },
                                     signature_name='default'),
            ]),
            'schema':
            text_format.Parse(
                """
              feature {
                name: "label2"
                type: FLOAT
                float_domain {
                  min: 5.0
                  max: 15.0
                }
              }
              """, schema_pb2.Schema()),
            'model_names': [''],
            'output_names': ['output2'],
            'expected_left':
            5.0,
            'expected_range':
            10.0
        }, {
            'testcase_name':
            'float_multiple_models',
            'eval_config':
            config_pb2.EvalConfig(model_specs=[
                config_pb2.ModelSpec(name='model1', label_key='label1'),
                config_pb2.ModelSpec(name='model2', label_key='label2')
            ]),
            'schema':
            text_format.Parse(
                """
              feature {
                name: "label2"
                type: FLOAT
                float_domain {
                  min: 5.0
                  max: 15.0
                }
              }
              """, schema_pb2.Schema()),
            'model_names': ['model2'],
            'output_names': [''],
            'expected_left':
            5.0,
            'expected_range':
            10.0
        })
    def testCalibrationPlotWithSchema(self, eval_config, schema, model_names,
                                      output_names, expected_left,
                                      expected_range):
        computations = calibration_plot.CalibrationPlot(
            num_buckets=10).computations(eval_config=eval_config,
                                         schema=schema,
                                         model_names=model_names,
                                         output_names=output_names)
        histogram = computations[0]
        self.assertEqual(expected_left, histogram.combiner._left)
        self.assertEqual(expected_range, histogram.combiner._range)
    def testUnbatchExtractor(self):
        model_spec = config_pb2.ModelSpec(label_key='label',
                                          example_weight_key='example_weight')
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        label_extractor = labels_extractor.LabelsExtractor(eval_config)
        example_weight_extractor = (
            example_weights_extractor.ExampleWeightsExtractor(eval_config))
        predict_extractor = predictions_extractor.PredictionsExtractor(
            eval_config)
        unbatch_inputs_extractor = unbatch_extractor.UnbatchExtractor()

        schema = text_format.Parse(
            """
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        examples = [
            self._makeExample(label=1.0,
                              example_weight=0.5,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(label=0.0,
                              example_weight=0.0,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string2'),
            self._makeExample(label=0.0,
                              example_weight=1.0,
                              fixed_int=2,
                              fixed_float=0.0,
                              fixed_string='fixed_string3')
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | label_extractor.stage_name >> label_extractor.ptransform
                | example_weight_extractor.stage_name >>
                example_weight_extractor.ptransform
                | predict_extractor.stage_name >> predict_extractor.ptransform
                | unbatch_inputs_extractor.stage_name >>
                unbatch_inputs_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 3)
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                            'fixed_float': np.array([1.0]),
                        })
                    self.assertEqual(
                        got[0][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string1']))
                    self.assertAlmostEqual(got[0][constants.LABELS_KEY],
                                           np.array([1.0]))
                    self.assertAlmostEqual(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY], np.array([0.5]))
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                            'fixed_float': np.array([1.0]),
                        })
                    self.assertEqual(
                        got[1][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string2']))
                    self.assertAlmostEqual(got[1][constants.LABELS_KEY],
                                           np.array([0.0]))
                    self.assertAlmostEqual(
                        got[1][constants.EXAMPLE_WEIGHTS_KEY], np.array([0.0]))
                    self.assertDictElementsAlmostEqual(
                        got[2][constants.FEATURES_KEY], {
                            'fixed_int': np.array([2]),
                            'fixed_float': np.array([0.0]),
                        })
                    self.assertEqual(
                        got[2][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string3']))
                    self.assertAlmostEqual(got[2][constants.LABELS_KEY],
                                           np.array([0.0]))
                    self.assertAlmostEqual(
                        got[2][constants.EXAMPLE_WEIGHTS_KEY], np.array([1.0]))

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 18
0
    def testPredictionsExtractorWithMultiModels(self):
        temp_export_dir = self._getExportDir()
        export_dir1, _ = multi_head.simple_multi_head(temp_export_dir, None)
        export_dir2, _ = multi_head.simple_multi_head(temp_export_dir, None)

        eval_config = config_pb2.EvalConfig(model_specs=[
            config_pb2.ModelSpec(name='model1'),
            config_pb2.ModelSpec(name='model2')
        ])
        eval_shared_model1 = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir1, tags=[tf.saved_model.SERVING])
        eval_shared_model2 = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir2, tags=[tf.saved_model.SERVING])
        schema = text_format.Parse(
            """
        feature {
          name: "age"
          type: FLOAT
        }
        feature {
          name: "langauge"
          type: BYTES
        }
        feature {
          name: "english_label"
          type: FLOAT
        }
        feature {
          name: "chinese_label"
          type: FLOAT
        }
        feature {
          name: "other_label"
          type: FLOAT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        prediction_extractor = predictions_extractor.PredictionsExtractor(
            eval_config=eval_config,
            eval_shared_model={
                'model1': eval_shared_model1,
                'model2': eval_shared_model2
            },
            tensor_adapter_config=tensor_adapter_config)

        examples = [
            self._makeExample(age=1.0,
                              language='english',
                              english_label=1.0,
                              chinese_label=0.0,
                              other_label=0.0),
            self._makeExample(age=1.0,
                              language='chinese',
                              english_label=0.0,
                              chinese_label=1.0,
                              other_label=0.0),
            self._makeExample(age=2.0,
                              language='english',
                              english_label=1.0,
                              chinese_label=0.0,
                              other_label=0.0),
            self._makeExample(age=2.0,
                              language='other',
                              english_label=0.0,
                              chinese_label=1.0,
                              other_label=1.0)
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=4)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | prediction_extractor.stage_name >>
                prediction_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    for item in got:
                        # We can't verify the actual predictions, but we can verify the keys
                        self.assertIn(constants.PREDICTIONS_KEY, item)
                        for pred in item[constants.PREDICTIONS_KEY]:
                            for model_name in ('model1', 'model2'):
                                self.assertIn(model_name, pred)
                                for output_name in ('chinese_head',
                                                    'english_head',
                                                    'other_head'):
                                    for pred_key in ('logistic',
                                                     'probabilities',
                                                     'all_classes'):
                                        self.assertIn(
                                            output_name + '/' + pred_key,
                                            pred[model_name])

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 19
0
 def testGetModelAndOutputNamesEmptyExtracts(self):
     eval_config = config_pb2.EvalConfig(
         model_specs=[config_pb2.ModelSpec()])
     self.assertEmpty(
         util.StandardExtracts({}).get_model_and_output_names(eval_config))
Exemplo n.º 20
0
    def testPredictionsExtractorWithRegressionModel(self):
        temp_export_dir = self._getExportDir()
        export_dir, _ = (fixed_prediction_estimator_extra_fields.
                         simple_fixed_prediction_estimator_extra_fields(
                             temp_export_dir, None))

        eval_config = config_pb2.EvalConfig(
            model_specs=[config_pb2.ModelSpec()])
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
        schema = text_format.Parse(
            """
        feature {
          name: "prediction"
          type: FLOAT
        }
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        prediction_extractor = predictions_extractor.PredictionsExtractor(
            eval_config=eval_config,
            eval_shared_model=eval_shared_model,
            tensor_adapter_config=tensor_adapter_config)

        examples = [
            self._makeExample(prediction=0.2,
                              label=1.0,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(prediction=0.8,
                              label=0.0,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string2'),
            self._makeExample(prediction=0.5,
                              label=0.0,
                              fixed_int=2,
                              fixed_float=1.0,
                              fixed_string='fixed_string3')
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | prediction_extractor.stage_name >>
                prediction_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    self.assertIn(constants.PREDICTIONS_KEY, got[0])
                    expected_preds = [0.2, 0.8, 0.5]
                    self.assertAlmostEqual(got[0][constants.PREDICTIONS_KEY],
                                           expected_preds)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 21
0
    def testPredictionsExtractorWithSequentialKerasModel(self):
        # Note that the input will be called 'test_input'
        model = tf.keras.models.Sequential([
            tf.keras.layers.Dense(1,
                                  activation=tf.nn.sigmoid,
                                  input_shape=(2, ),
                                  name='test')
        ])
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=.001),
                      loss=tf.keras.losses.binary_crossentropy,
                      metrics=['accuracy'])

        train_features = {'test_input': [[0.0, 0.0], [1.0, 1.0]]}
        labels = [[1], [0]]
        example_weights = [1.0, 0.5]
        dataset = tf.data.Dataset.from_tensor_slices(
            (train_features, labels, example_weights))
        dataset = dataset.shuffle(buffer_size=1).repeat().batch(2)
        model.fit(dataset, steps_per_epoch=1)

        export_dir = self._getExportDir()
        model.save(export_dir, save_format='tf')

        eval_config = config_pb2.EvalConfig(
            model_specs=[config_pb2.ModelSpec()])
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
        schema = text_format.Parse(
            """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "test"
              value {
                dense_tensor {
                  column_name: "test"
                  shape { dim { size: 2 } }
                }
              }
            }
          }
        }
        feature {
          name: "test"
          type: FLOAT
        }
        feature {
          name: "non_model_feature"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        prediction_extractor = predictions_extractor.PredictionsExtractor(
            eval_config=eval_config,
            eval_shared_model=eval_shared_model,
            tensor_adapter_config=tensor_adapter_config)

        # Notice that the features are 'test' but the model expects 'test_input'.
        # This tests that the PredictExtractor properly handles this case.
        examples = [
            self._makeExample(
                test=[0.0,
                      0.0], non_model_feature=0),  # should be ignored by model
            self._makeExample(
                test=[1.0,
                      1.0], non_model_feature=1),  # should be ignored by model
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | prediction_extractor.stage_name >>
                prediction_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    # We can't verify the actual predictions, but we can verify the keys.
                    for item in got:
                        self.assertIn(constants.PREDICTIONS_KEY, item)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
    def testExampleWeightsExtractorMultiModel(self):
        model_spec1 = config_pb2.ModelSpec(name='model1',
                                           example_weight_key='example_weight')
        model_spec2 = config_pb2.ModelSpec(name='model2',
                                           example_weight_keys={
                                               'output1': 'example_weight1',
                                               'output2': 'example_weight2'
                                           })
        eval_config = config_pb2.EvalConfig(
            model_specs=[model_spec1, model_spec2])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        example_weight_extractor = example_weights_extractor.ExampleWeightsExtractor(
            eval_config)

        schema = text_format.Parse(
            """
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "example_weight1"
          type: FLOAT
        }
        feature {
          name: "example_weight2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        examples = [
            self._makeExample(example_weight=0.5,
                              example_weight1=0.5,
                              example_weight2=0.5,
                              fixed_int=1),
            self._makeExample(example_weight=0.0,
                              example_weight1=0.0,
                              example_weight2=1.0,
                              fixed_int=1)
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | example_weight_extractor.stage_name >>
                example_weight_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name,
                                      got[0][constants.EXAMPLE_WEIGHTS_KEY])
                    self.assertAllClose(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY]['model1'],
                        np.array([0.5, 0.0]))
                    self.assertAllClose(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY]['model2'], {
                            'output1': np.array([0.5, 0.0]),
                            'output2': np.array([0.5, 1.0])
                        })

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 23
0
class ModelUtilTest(testutil.TensorflowModelAnalysisTest,
                    parameterized.TestCase):
    def createDenseInputsSchema(self):
        return text_format.Parse(
            """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "input_1"
              value {
                dense_tensor {
                  column_name: "input_1"
                  shape { dim { size: 1 } }
                }
              }
            }
            tensor_representation {
              key: "input_2"
              value {
                dense_tensor {
                  column_name: "input_2"
                  shape { dim { size: 1 } }
                }
              }
            }
          }
        }
        feature {
          name: "input_1"
          type: FLOAT
        }
        feature {
          name: "input_2"
          type: FLOAT
        }
        feature {
          name: "non_model_feature"
          type: INT
        }
        """, schema_pb2.Schema())

    def createModelWithSingleInput(self, save_as_keras):
        input_layer = tf.keras.layers.Input(shape=(1, ), name='input')
        output_layer = tf.keras.layers.Dense(
            1, activation=tf.nn.sigmoid)(input_layer)
        model = tf.keras.models.Model(input_layer, output_layer)

        @tf.function
        def serving_default(s):
            return model(s)

        input_spec = {
            'input': tf.TensorSpec(shape=(None, 1),
                                   dtype=tf.string,
                                   name='input'),
        }
        signatures = {
            'serving_default':
            serving_default.get_concrete_function(input_spec),
            'custom_signature':
            serving_default.get_concrete_function(input_spec),
        }

        export_path = tempfile.mkdtemp()
        if save_as_keras:
            model.save(export_path, save_format='tf', signatures=signatures)
        else:
            tf.saved_model.save(model, export_path, signatures=signatures)
        return export_path

    def createModelWithMultipleDenseInputs(self, save_as_keras):
        input1 = tf.keras.layers.Input(shape=(1, ), name='input_1')
        input2 = tf.keras.layers.Input(shape=(1, ), name='input_2')
        inputs = [input1, input2]
        input_layer = tf.keras.layers.concatenate(inputs)
        output_layer = tf.keras.layers.Dense(1,
                                             activation=tf.nn.sigmoid,
                                             name='output')(input_layer)
        model = tf.keras.models.Model(inputs, output_layer)

        # Add custom attribute to model to test callables stored as attributes
        model.custom_attribute = tf.keras.models.Model(inputs, output_layer)

        @tf.function
        def serving_default(serialized_tf_examples):
            parsed_features = tf.io.parse_example(
                serialized_tf_examples, {
                    'input_1': tf.io.FixedLenFeature([1], dtype=tf.float32),
                    'input_2': tf.io.FixedLenFeature([1], dtype=tf.float32)
                })
            return model(parsed_features)

        @tf.function
        def custom_single_output(features):
            return model(features)

        @tf.function
        def custom_multi_output(features):
            return {'output1': model(features), 'output2': model(features)}

        input_spec = tf.TensorSpec(shape=(None, ),
                                   dtype=tf.string,
                                   name='examples')
        custom_input_spec = {
            'input_1':
            tf.TensorSpec(shape=(None, 1), dtype=tf.float32, name='input_1'),
            'input_2':
            tf.TensorSpec(shape=(None, 1), dtype=tf.float32, name='input_2')
        }
        signatures = {
            'serving_default':
            serving_default.get_concrete_function(input_spec),
            'custom_single_output':
            custom_single_output.get_concrete_function(custom_input_spec),
            'custom_multi_output':
            custom_multi_output.get_concrete_function(custom_input_spec)
        }

        export_path = tempfile.mkdtemp()
        if save_as_keras:
            model.save(export_path, save_format='tf', signatures=signatures)
        else:
            tf.saved_model.save(model, export_path, signatures=signatures)
        return export_path

    def createModelWithInvalidOutputShape(self):
        input1 = tf.keras.layers.Input(shape=(1, ), name='input_1')
        input2 = tf.keras.layers.Input(shape=(1, ), name='input_2')
        inputs = [input1, input2]
        input_layer = tf.keras.layers.concatenate(inputs)
        output_layer = tf.keras.layers.Dense(2,
                                             activation=tf.nn.sigmoid,
                                             name='output')(input_layer)
        # Flatten the layer such that the first dimension no longer corresponds
        # with the batch size.
        reshape_layer = tf.keras.layers.Lambda(lambda x: tf.reshape(x, [-1]),
                                               name='reshape')(output_layer)
        model = tf.keras.models.Model(inputs, reshape_layer)

        @tf.function
        def serving_default(serialized_tf_examples):
            parsed_features = tf.io.parse_example(
                serialized_tf_examples, {
                    'input_1': tf.io.FixedLenFeature([1], dtype=tf.float32),
                    'input_2': tf.io.FixedLenFeature([1], dtype=tf.float32)
                })
            return model(parsed_features)

        input_spec = tf.TensorSpec(shape=(None, ),
                                   dtype=tf.string,
                                   name='examples')
        signatures = {
            'serving_default':
            serving_default.get_concrete_function(input_spec),
        }

        export_path = tempfile.mkdtemp()
        model.save(export_path, save_format='tf', signatures=signatures)
        return export_path

    def createModelWithMultipleMixedInputs(self, save_as_keras):
        dense_input = tf.keras.layers.Input(shape=(2, ),
                                            name='input_1',
                                            dtype=tf.int64)
        dense_float_input = tf.cast(dense_input, tf.float32)
        sparse_input = tf.keras.layers.Input(shape=(1, ),
                                             name='input_2',
                                             sparse=True)
        dense_sparse_input = tf.keras.layers.Dense(
            1, name='dense_input2')(sparse_input)
        ragged_input = tf.keras.layers.Input(shape=(None, ),
                                             name='input_3',
                                             ragged=True)
        dense_ragged_input = tf.keras.layers.Lambda(lambda x: x.to_tensor())(
            ragged_input)
        dense_ragged_input.set_shape((None, 1))
        inputs = [dense_input, sparse_input, ragged_input]
        input_layer = tf.keras.layers.concatenate(
            [dense_float_input, dense_sparse_input, dense_ragged_input])
        output_layer = tf.keras.layers.Dense(
            1, activation=tf.nn.sigmoid)(input_layer)
        model = tf.keras.models.Model(inputs, output_layer)

        @tf.function
        def serving_default(features):
            return model(features)

        input_spec = {
            'input_1':
            tf.TensorSpec(shape=(None, 2), dtype=tf.int64, name='input_1'),
            'input_2':
            tf.SparseTensorSpec(shape=(None, 1), dtype=tf.float32),
            'input_3':
            tf.RaggedTensorSpec(shape=(None, 1), dtype=tf.float32)
        }
        signatures = {
            'serving_default':
            serving_default.get_concrete_function(input_spec),
            'custom_signature':
            serving_default.get_concrete_function(input_spec),
        }

        export_path = tempfile.mkdtemp()
        if save_as_keras:
            model.save(export_path, save_format='tf', signatures=signatures)
        else:
            tf.saved_model.save(model, export_path, signatures=signatures)
        return export_path

    def testFilterByInputNames(self):
        tensors = {
            'f1': tf.constant([[1.1], [2.1]], dtype=tf.float32),
            'f2': tf.constant([[1], [2]], dtype=tf.int64),
            'f3': tf.constant([['hello'], ['world']], dtype=tf.string)
        }
        filtered_tensors = model_util.filter_by_input_names(
            tensors, ['f1', 'f3'])
        self.assertLen(filtered_tensors, 2)
        self.assertAllEqual(tf.constant([[1.1], [2.1]], dtype=tf.float32),
                            filtered_tensors['f1'])
        self.assertAllEqual(
            tf.constant([['hello'], ['world']], dtype=tf.string),
            filtered_tensors['f3'])

    @parameterized.named_parameters(
        ('one_baseline',
         text_format.Parse(
             """
             model_specs {
               name: "candidate"
             }
             model_specs {
               name: "baseline"
               is_baseline: true
             }
           """, config_pb2.EvalConfig()),
         text_format.Parse(
             """
             name: "baseline"
             is_baseline: true
           """, config_pb2.ModelSpec())),
        ('no_baseline',
         text_format.Parse(
             """
             model_specs {
               name: "candidate"
             }
           """, config_pb2.EvalConfig()), None),
    )
    def test_get_baseline_model(self, eval_config,
                                expected_baseline_model_spec):
        self.assertEqual(expected_baseline_model_spec,
                         model_util.get_baseline_model_spec(eval_config))

    @parameterized.named_parameters(
        ('one_non_baseline',
         text_format.Parse(
             """
             model_specs {
               name: "candidate"
             }
             model_specs {
               name: "baseline"
               is_baseline: true
             }
           """, config_pb2.EvalConfig()), [
                 text_format.Parse(
                     """
             name: "candidate"
           """, config_pb2.ModelSpec())
             ]),
        ('no_non_baseline',
         text_format.Parse(
             """
             model_specs {
               name: "baseline"
               is_baseline: true
             }
           """, config_pb2.EvalConfig()), []),
    )
    def test_get_non_baseline_model(self, eval_config,
                                    expected_non_baseline_model_specs):
        self.assertCountEqual(
            expected_non_baseline_model_specs,
            model_util.get_non_baseline_model_specs(eval_config))

    def testFilterByInputNamesKeras(self):
        tensors = {
            'f1': tf.constant([[1.1], [2.1]], dtype=tf.float32),
            'f2': tf.constant([[1], [2]], dtype=tf.int64),
            'f3': tf.constant([['hello'], ['world']], dtype=tf.string)
        }
        filtered_tensors = model_util.filter_by_input_names(
            tensors, [
                'f1' + model_util.KERAS_INPUT_SUFFIX,
                'f3' + model_util.KERAS_INPUT_SUFFIX
            ])
        self.assertLen(filtered_tensors, 2)
        self.assertAllEqual(
            tf.constant([[1.1], [2.1]], dtype=tf.float32),
            filtered_tensors['f1' + model_util.KERAS_INPUT_SUFFIX])
        self.assertAllEqual(
            tf.constant([['hello'], ['world']], dtype=tf.string),
            filtered_tensors['f3' + model_util.KERAS_INPUT_SUFFIX])

    @parameterized.named_parameters(
        ('output_name_and_label_key', config_pb2.ModelSpec(label_key='label'),
         'output', 'label'),
        ('output_name_and_label_keys',
         config_pb2.ModelSpec(label_keys={'output': 'label'}),
         'output', 'label'), ('output_name_and_no_label_keys',
                              config_pb2.ModelSpec(), 'output', None),
        ('no_output_name_and_label_key',
         config_pb2.ModelSpec(label_key='label'), '', 'label'),
        ('no_output_name_and_no_label_keys', config_pb2.ModelSpec(), '', None))
    def testGetLabelKey(self, model_spec, output_name, expected_label_key):
        self.assertEqual(expected_label_key,
                         model_util.get_label_key(model_spec, output_name))

    def testGetLabelKeyNoOutputAndLabelKeys(self):
        with self.assertRaises(ValueError):
            model_util.get_label_key(
                config_pb2.ModelSpec(label_keys={'output1': 'label'}), '')

    @parameterized.named_parameters(
        {
            'testcase_name': 'single_model_single_key',
            'model_specs': [config_pb2.ModelSpec(label_key='feature1')],
            'field': 'label_key',
            'multi_output_field': 'label_keys',
            'expected_values': [
                [1.0, 1.1, 1.2],
            ]
        },
        {
            'testcase_name':
            'single_model_multi_key',
            'model_specs': [
                config_pb2.ModelSpec(label_keys={
                    'output1': 'feature1',
                    'output2': 'feature2'
                })
            ],
            'field':
            'label_key',
            'multi_output_field':
            'label_keys',
            'expected_values': [
                {
                    'output1': [1.0, 1.1, 1.2],
                    'output2': [2.0, 2.1, 2.2]
                },
            ]
        },
        {
            'testcase_name':
            'multi_model_single_key',
            'model_specs': [
                config_pb2.ModelSpec(name='model1',
                                     example_weight_key='feature2'),
                config_pb2.ModelSpec(name='model2',
                                     example_weight_key='feature3')
            ],
            'field':
            'example_weight_key',
            'multi_output_field':
            'example_weight_keys',
            'expected_values': [
                {
                    'model1': [2.0, 2.1, 2.2],
                    'model2': [3.0, 3.1, 3.2]
                },
            ]
        },
        {
            'testcase_name':
            'multi_model_multi_key',
            'model_specs': [
                config_pb2.ModelSpec(name='model1',
                                     prediction_keys={
                                         'output1': 'feature1',
                                         'output2': 'feature2'
                                     }),
                config_pb2.ModelSpec(name='model2',
                                     prediction_keys={
                                         'output1': 'feature1',
                                         'output3': 'feature3'
                                     })
            ],
            'field':
            'prediction_key',
            'multi_output_field':
            'prediction_keys',
            'expected_values': [
                {
                    'model1': {
                        'output1': [1.0, 1.1, 1.2],
                        'output2': [2.0, 2.1, 2.2]
                    },
                    'model2': {
                        'output1': [1.0, 1.1, 1.2],
                        'output3': [3.0, 3.1, 3.2]
                    }
                },
            ]
        },
    )
    def testGetFeatureValuesForModelSpecField(self, model_specs, field,
                                              multi_output_field,
                                              expected_values):
        extracts = {
            # Only need the num_rows from RecordBatch so use fake array of same len
            # as features.
            constants.ARROW_RECORD_BATCH_KEY:
            pa.RecordBatch.from_arrays([pa.array([1])], ['dummy']),
            constants.FEATURES_KEY: [
                {
                    'feature1': [1.0, 1.1, 1.2],
                    'feature2': [2.0, 2.1, 2.2],
                    'feature3': [3.0, 3.1, 3.2],
                },
            ]
        }
        got = model_util.get_feature_values_for_model_spec_field(
            model_specs, field, multi_output_field, extracts)
        self.assertAlmostEqual(expected_values, got)

    @parameterized.named_parameters(
        {
            'testcase_name': 'single_model_single_key',
            'model_specs': [config_pb2.ModelSpec(label_key='feature2')],
            'field': 'label_key',
            'multi_output_field': 'label_keys',
            'expected_values': [
                [4.0, 4.1, 4.2],
            ]
        },
        {
            'testcase_name':
            'single_model_multi_key',
            'model_specs': [
                config_pb2.ModelSpec(label_keys={
                    'output1': 'feature1',
                    'output2': 'feature2'
                })
            ],
            'field':
            'label_key',
            'multi_output_field':
            'label_keys',
            'expected_values': [
                {
                    'output1': [1.0, 1.1, 1.2],
                    'output2': [4.0, 4.1, 4.2]
                },
            ]
        },
    )
    def testGetFeatureValuesForModelSpecFieldWithSingleModelTransforedFeatures(
            self, model_specs, field, multi_output_field, expected_values):
        extracts = {
            # Only need the num_rows from RecordBatch so use fake array of same len
            # as features.
            constants.ARROW_RECORD_BATCH_KEY:
            pa.RecordBatch.from_arrays([pa.array([1])], ['dummy']),
            constants.FEATURES_KEY: [
                {
                    'feature1': [1.0, 1.1, 1.2],
                    'feature2': [2.0, 2.1, 2.2],
                },
            ],
            constants.TRANSFORMED_FEATURES_KEY: [
                {
                    'feature2': [4.0, 4.1, 4.2],
                },
            ]
        }
        got = model_util.get_feature_values_for_model_spec_field(
            model_specs, field, multi_output_field, extracts)
        self.assertAlmostEqual(expected_values, got)

    @parameterized.named_parameters(
        {
            'testcase_name':
            'multi_model_single_key',
            'model_specs': [
                config_pb2.ModelSpec(name='model1',
                                     example_weight_key='feature2'),
                config_pb2.ModelSpec(name='model2',
                                     example_weight_key='feature3')
            ],
            'field':
            'example_weight_key',
            'multi_output_field':
            'example_weight_keys',
            'expected_values': [
                {
                    'model1': [4.0, 4.1, 4.2],
                    'model2': [7.0, 7.1, 7.2]
                },
            ]
        },
        {
            'testcase_name':
            'multi_model_multi_key',
            'model_specs': [
                config_pb2.ModelSpec(name='model1',
                                     example_weight_keys={
                                         'output1': 'feature1',
                                         'output2': 'feature2'
                                     }),
                config_pb2.ModelSpec(name='model2',
                                     example_weight_keys={
                                         'output1': 'feature1',
                                         'output3': 'feature3'
                                     })
            ],
            'field':
            'example_weight_key',
            'multi_output_field':
            'example_weight_keys',
            'expected_values': [
                {
                    'model1': {
                        'output1': [1.0, 1.1, 1.2],
                        'output2': [4.0, 4.1, 4.2]
                    },
                    'model2': {
                        'output1': [1.0, 1.1, 1.2],
                        'output3': [7.0, 7.1, 7.2]
                    }
                },
            ]
        },
    )
    def testGetFeatureValuesForModelSpecFieldWithMultiModelTransforedFeatures(
            self, model_specs, field, multi_output_field, expected_values):
        extracts = {
            # Only need the num_rows from RecordBatch so use fake array of same len
            # as features.
            constants.ARROW_RECORD_BATCH_KEY:
            pa.RecordBatch.from_arrays([pa.array([1])], ['dummy']),
            constants.FEATURES_KEY: [
                {
                    'feature1': [1.0, 1.1, 1.2],
                    'feature2': [2.0, 2.1, 2.2],
                },
            ],
            constants.TRANSFORMED_FEATURES_KEY: [
                {
                    'model1': {
                        'feature2': [4.0, 4.1, 4.2],
                        'feature3': [5.0, 5.1, 5.2]
                    },
                    'model2': {
                        'feature2': [6.0, 6.1, 6.2],
                        'feature3': [7.0, 7.1, 7.2]
                    }
                },
            ]
        }
        got = model_util.get_feature_values_for_model_spec_field(
            model_specs, field, multi_output_field, extracts)
        self.assertAlmostEqual(expected_values, got)

    def testGetFeatureValuesForModelSpecFieldNoValues(self):
        model_spec = config_pb2.ModelSpec(name='model1',
                                          example_weight_key='feature2')
        extracts = {
            constants.ARROW_RECORD_BATCH_KEY:
            pa.RecordBatch.from_arrays([pa.array([1])], ['dummy']),
        }
        got = model_util.get_feature_values_for_model_spec_field(
            [model_spec], 'example_weight', 'example_weights', extracts)
        self.assertIsNone(got)

    @parameterized.named_parameters(
        ('keras_serving_default', True, 'serving_default'),
        ('keras_custom_signature', True, 'custom_signature'),
        ('tf2_serving_default', False, 'serving_default'),
        ('tf2_custom_signature', False, 'custom_signature'))
    def testGetCallableWithSignatures(self, save_as_keras, signature_name):
        export_path = self.createModelWithSingleInput(save_as_keras)
        if save_as_keras:
            model = tf.keras.models.load_model(export_path)
        else:
            model = tf.compat.v1.saved_model.load_v2(export_path)
        self.assertIsNotNone(model_util.get_callable(model, signature_name))

    @parameterized.named_parameters(('keras', True), ('tf2', False))
    def testGetCallableWithMissingSignatures(self, save_as_keras):
        export_path = self.createModelWithSingleInput(save_as_keras)
        if save_as_keras:
            model = tf.keras.models.load_model(export_path)
        else:
            model = tf.compat.v1.saved_model.load_v2(export_path)
        with self.assertRaises(ValueError):
            model_util.get_callable(model, 'non_existent')

    @unittest.skipIf(_TF_MAJOR_VERSION < 2,
                     'not all input types supported for TF1')
    def testGetCallableWithKerasModel(self):
        export_path = self.createModelWithMultipleMixedInputs(True)
        model = tf.keras.models.load_model(export_path)
        self.assertEqual(model, model_util.get_callable(model))

    @parameterized.named_parameters(
        ('keras_serving_default', True, 'serving_default'),
        ('keras_custom_signature', True, 'custom_signature'),
        ('tf2_serving_default', False, None),
        ('tf2_custom_signature', False, 'custom_signature'))
    def testGetInputSpecsWithSignatures(self, save_as_keras, signature_name):
        export_path = self.createModelWithSingleInput(save_as_keras)
        if save_as_keras:
            model = tf.keras.models.load_model(export_path)
        else:
            model = tf.compat.v1.saved_model.load_v2(export_path)
        self.assertEqual(
            {
                'input':
                tf.TensorSpec(name='input', shape=(None, 1), dtype=tf.string),
            }, model_util.get_input_specs(model, signature_name))

    @parameterized.named_parameters(('keras', True), ('tf2', False))
    def testGetInputSpecsWithMissingSignatures(self, save_as_keras):
        export_path = self.createModelWithSingleInput(save_as_keras)
        if save_as_keras:
            model = tf.keras.models.load_model(export_path)
        else:
            model = tf.compat.v1.saved_model.load_v2(export_path)
        with self.assertRaises(ValueError):
            model_util.get_callable(model, 'non_existent')

    @unittest.skipIf(_TF_MAJOR_VERSION < 2,
                     'not all input types supported for TF1')
    def testGetInputSpecsWithKerasModel(self):
        export_path = self.createModelWithMultipleMixedInputs(True)
        model = tf.keras.models.load_model(export_path)

        # Some versions of TF set the TensorSpec.name and others do not. Since we
        # don't care about the name, clear it from the output for testing purposes
        specs = model_util.get_input_specs(model)
        for k, v in specs.items():
            if isinstance(v, tf.TensorSpec):
                specs[k] = tf.TensorSpec(shape=v.shape, dtype=v.dtype)
        self.assertEqual(
            {
                'input_1':
                tf.TensorSpec(shape=(None, 2), dtype=tf.int64),
                'input_2':
                tf.SparseTensorSpec(shape=(None, 1), dtype=tf.float32),
                'input_3':
                tf.RaggedTensorSpec(shape=(None, None), dtype=tf.float32),
            }, specs)

    def testInputSpecsToTensorRepresentations(self):
        tensor_representations = model_util.input_specs_to_tensor_representations(
            {
                'input_1':
                tf.TensorSpec(shape=(None, 2), dtype=tf.int64),
                'input_2':
                tf.SparseTensorSpec(shape=(None, 1), dtype=tf.float32),
                'input_3':
                tf.RaggedTensorSpec(shape=(None, None), dtype=tf.float32),
            })
        dense_tensor_representation = text_format.Parse(
            """
        dense_tensor {
          column_name: "input_1"
          shape { dim { size: 2 } }
        }
        """, schema_pb2.TensorRepresentation())
        sparse_tensor_representation = text_format.Parse(
            """
        varlen_sparse_tensor {
          column_name: "input_2"
        }
        """, schema_pb2.TensorRepresentation())
        ragged_tensor_representation = text_format.Parse(
            """
        ragged_tensor {
          feature_path {
            step: "input_3"
          }
        }
        """, schema_pb2.TensorRepresentation())
        self.assertEqual(
            {
                'input_1': dense_tensor_representation,
                'input_2': sparse_tensor_representation,
                'input_3': ragged_tensor_representation
            }, tensor_representations)

    def testInputSpecsToTensorRepresentationsRaisesWithUnknownDims(self):
        with self.assertRaises(ValueError):
            model_util.input_specs_to_tensor_representations({
                'input_1':
                tf.TensorSpec(shape=(None, None), dtype=tf.int64),
            })

    @parameterized.named_parameters(
        ('keras_default', True, {
            constants.PREDICTIONS_KEY: {
                '': [None]
            }
        }, None, False, True, 1),
        ('tf_default', False, {
            constants.PREDICTIONS_KEY: {
                '': [None]
            }
        }, None, False, True, 1),
        ('keras_serving_default', True, {
            constants.PREDICTIONS_KEY: {
                '': ['serving_default']
            }
        }, None, False, True, 1),
        ('tf_serving_default', False, {
            constants.PREDICTIONS_KEY: {
                '': ['serving_default']
            }
        }, None, False, True, 1),
        ('keras_custom_single_output', True, {
            constants.PREDICTIONS_KEY: {
                '': ['custom_single_output']
            }
        }, None, False, True, 1),
        ('tf_custom_single_output', False, {
            constants.PREDICTIONS_KEY: {
                '': ['custom_single_output']
            }
        }, None, False, True, 1),
        ('keras_custom_multi_output', True, {
            constants.PREDICTIONS_KEY: {
                '': ['custom_multi_output']
            }
        }, None, False, True, 2),
        ('tf_custom_multi_output', False, {
            constants.PREDICTIONS_KEY: {
                '': ['custom_multi_output']
            }
        }, None, False, True, 2),
        ('multi_model', True, {
            constants.PREDICTIONS_KEY: {
                'model1': ['custom_multi_output'],
                'model2': ['custom_multi_output']
            }
        }, None, False, True, 2),
        ('default_signatures', True, {
            constants.PREDICTIONS_KEY: {
                '': [],
            }
        }, ['unknown', 'custom_single_output'], False, True, 1),
        ('keras_prefer_dict_outputs', True, {
            constants.FEATURES_KEY: {
                '': [],
            }
        }, ['unknown', 'custom_single_output', 'custom_multi_output'
            ], True, True, 3),
        ('tf_prefer_dict_outputs', False, {
            constants.FEATURES_KEY: {
                '': [],
            }
        }, ['unknown', 'custom_single_output', 'custom_multi_output'
            ], True, True, 3),
        ('custom_attribute', True, {
            constants.FEATURES_KEY: {
                '': ['custom_attribute'],
            }
        }, None, True, True, 1),
        ('keras_no_schema', True, {
            constants.PREDICTIONS_KEY: {
                '': [None]
            }
        }, None, False, False, 1),
        ('tf_no_schema', False, {
            constants.PREDICTIONS_KEY: {
                '': [None]
            }
        }, None, False, False, 1),
        ('preprocessing_function', True, {
            constants.TRANSFORMED_FEATURES_KEY: {
                '': ['_plus_one@input_1']
            }
        }, None, False, False, 1),
    )
    @unittest.skipIf(_TF_MAJOR_VERSION < 2,
                     'not all signatures supported for TF1')
    def testModelSignaturesDoFn(self, save_as_keras, signature_names,
                                default_signature_names, prefer_dict_outputs,
                                use_schema, expected_num_outputs):
        export_path = self.createModelWithMultipleDenseInputs(save_as_keras)
        eval_shared_models = {}
        model_specs = []
        for sigs in signature_names.values():
            for model_name in sigs:
                if model_name not in eval_shared_models:
                    eval_shared_models[
                        model_name] = self.createTestEvalSharedModel(
                            eval_saved_model_path=export_path,
                            model_name=model_name,
                            tags=[tf.saved_model.SERVING])
                    model_specs.append(config_pb2.ModelSpec(name=model_name))
        eval_config = config_pb2.EvalConfig(model_specs=model_specs)
        schema = self.createDenseInputsSchema() if use_schema else None
        tfx_io = tf_example_record.TFExampleBeamRecord(
            physical_format='text',
            schema=schema,
            raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = None
        if use_schema:
            tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
                arrow_schema=tfx_io.ArrowSchema(),
                tensor_representations=tfx_io.TensorRepresentations())

        examples = [
            self._makeExample(input_1=1.0, input_2=2.0),
            self._makeExample(input_1=3.0, input_2=4.0),
            self._makeExample(input_1=5.0, input_2=6.0),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (pipeline
                      | 'Create' >> beam.Create(
                          [e.SerializeToString() for e in examples])
                      | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                      | 'ToExtracts' >> beam.Map(_record_batch_to_extracts)
                      | 'ModelSignatures' >> beam.ParDo(
                          model_util.ModelSignaturesDoFn(
                              eval_config=eval_config,
                              eval_shared_models=eval_shared_models,
                              signature_names=signature_names,
                              default_signature_names=default_signature_names,
                              prefer_dict_outputs=prefer_dict_outputs,
                              tensor_adapter_config=tensor_adapter_config)))

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    for key in signature_names:
                        self.assertIn(key, got[0])
                        if prefer_dict_outputs:
                            for entry in got[0][key]:
                                self.assertIsInstance(entry, dict)
                                self.assertLen(entry, expected_num_outputs)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')

    def testModelSignaturesDoFnError(self):
        export_path = self.createModelWithInvalidOutputShape()
        signature_names = {constants.PREDICTIONS_KEY: {'': [None]}}
        eval_shared_models = {
            '':
            self.createTestEvalSharedModel(eval_saved_model_path=export_path,
                                           tags=[tf.saved_model.SERVING])
        }
        model_specs = [config_pb2.ModelSpec()]
        eval_config = config_pb2.EvalConfig(model_specs=model_specs)
        schema = self.createDenseInputsSchema()
        tfx_io = tf_example_record.TFExampleBeamRecord(
            physical_format='text',
            schema=schema,
            raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())

        examples = [
            self._makeExample(input_1=1.0, input_2=2.0),
            self._makeExample(input_1=3.0, input_2=4.0),
            self._makeExample(input_1=5.0, input_2=6.0),
        ]

        with self.assertRaisesRegex(
                ValueError,
                'First dimension does not correspond with batch size.'):
            with beam.Pipeline() as pipeline:
                # pylint: disable=no-value-for-parameter
                _ = (pipeline
                     | 'Create' >> beam.Create(
                         [e.SerializeToString() for e in examples])
                     | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                     | 'ToExtracts' >> beam.Map(_record_batch_to_extracts)
                     | 'ModelSignatures' >> beam.ParDo(
                         model_util.ModelSignaturesDoFn(
                             eval_config=eval_config,
                             eval_shared_models=eval_shared_models,
                             signature_names=signature_names,
                             default_signature_names=None,
                             prefer_dict_outputs=False,
                             tensor_adapter_config=tensor_adapter_config)))

    def testHasRubberStamp(self):
        # Model agnostic.
        self.assertFalse(model_util.has_rubber_stamp(None))

        # All non baseline models has rubber stamp.
        baseline = self.createTestEvalSharedModel(
            model_name=constants.BASELINE_KEY, is_baseline=True)
        candidate = self.createTestEvalSharedModel(
            model_name=constants.CANDIDATE_KEY, rubber_stamp=True)
        self.assertTrue(model_util.has_rubber_stamp([baseline, candidate]))

        # Not all non baseline has rubber stamp.
        candidate_nr = self.createTestEvalSharedModel(
            model_name=constants.CANDIDATE_KEY)
        self.assertFalse(model_util.has_rubber_stamp([candidate_nr]))
        self.assertFalse(
            model_util.has_rubber_stamp([baseline, candidate, candidate_nr]))
Exemplo n.º 24
0
 def testGetLabelKeyNoOutputAndLabelKeys(self):
     with self.assertRaises(ValueError):
         model_util.get_label_key(
             config_pb2.ModelSpec(label_keys={'output1': 'label'}), '')
Exemplo n.º 25
0
    def testTFJSPredictExtractorWithKerasModel(self, multi_model,
                                               multi_output):
        if not _TFJS_IMPORTED:
            self.skipTest('This test requires TensorFlow JS.')

        input1 = tf.keras.layers.Input(shape=(1, ), name='input1')
        input2 = tf.keras.layers.Input(shape=(1, ), name='input2')
        inputs = [input1, input2]
        input_layer = tf.keras.layers.concatenate(inputs)
        output_layers = {}
        output_layers['output1'] = (tf.keras.layers.Dense(
            1, activation=tf.nn.sigmoid, name='output1')(input_layer))
        if multi_output:
            output_layers['output2'] = (tf.keras.layers.Dense(
                1, activation=tf.nn.sigmoid, name='output2')(input_layer))

        model = tf.keras.models.Model(inputs, output_layers)
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=.001),
                      loss=tf.keras.losses.binary_crossentropy,
                      metrics=['accuracy'])

        train_features = {'input1': [[0.0], [1.0]], 'input2': [[1.0], [0.0]]}
        labels = {'output1': [[1], [0]]}
        if multi_output:
            labels['output2'] = [[1], [0]]

        example_weights = {'output1': [1.0, 0.5]}
        if multi_output:
            example_weights['output2'] = [1.0, 0.5]
        dataset = tf.data.Dataset.from_tensor_slices(
            (train_features, labels, example_weights))
        dataset = dataset.shuffle(buffer_size=1).repeat().batch(2)
        model.fit(dataset, steps_per_epoch=1)

        src_model_path = tempfile.mkdtemp()
        model.save(src_model_path)

        dst_model_path = tempfile.mkdtemp()
        converter.convert([
            '--input_format=tf_saved_model',
            '--saved_model_tags=serve',
            '--signature_name=serving_default',
            src_model_path,
            dst_model_path,
        ])

        model_specs = [config_pb2.ModelSpec(name='model1', model_type='tf_js')]
        if multi_model:
            model_specs.append(
                config_pb2.ModelSpec(name='model2', model_type='tf_js'))

        eval_config = config_pb2.EvalConfig(model_specs=model_specs)
        eval_shared_models = [
            self.createTestEvalSharedModel(
                model_name='model1',
                eval_saved_model_path=dst_model_path,
                model_type='tf_js')
        ]
        if multi_model:
            eval_shared_models.append(
                self.createTestEvalSharedModel(
                    model_name='model2',
                    eval_saved_model_path=dst_model_path,
                    model_type='tf_js'))

        schema = text_format.Parse(
            """
        feature {
          name: "input1"
          type: FLOAT
        }
        feature {
          name: "input2"
          type: FLOAT
        }
        feature {
          name: "non_model_feature"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        predictor = tfjs_predict_extractor.TFJSPredictExtractor(
            eval_config=eval_config, eval_shared_model=eval_shared_models)

        examples = [
            self._makeExample(input1=0.0, input2=1.0, non_model_feature=0),
            self._makeExample(input1=1.0, input2=0.0, non_model_feature=1),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | predictor.stage_name >> predictor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    got = got[0]
                    self.assertIn(constants.PREDICTIONS_KEY, got)
                    self.assertLen(got[constants.PREDICTIONS_KEY], 2)

                    for item in got[constants.PREDICTIONS_KEY]:
                        if multi_model:
                            self.assertIn('model1', item)
                            self.assertIn('model2', item)
                            if multi_output:
                                self.assertIn('Identity', item['model1'])
                                self.assertIn('Identity_1', item['model1'])

                        elif multi_output:
                            self.assertIn('Identity', item)
                            self.assertIn('Identity_1', item)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
    def test_features_extractor(self):
        model_spec = config_pb2.ModelSpec()
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)

        schema = text_format.Parse(
            """
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = tf_example_record.TFExampleBeamRecord(
            schema=schema,
            raw_record_column_name=constants.ARROW_INPUT_COLUMN,
            physical_format='inmem',
            telemetry_descriptors=['testing'])

        example_kwargs = [
            {
                'fixed_int': 1,
                'fixed_float': 1.0,
                'fixed_string': 'fixed_string1'
            },
            {
                'fixed_int': 1,
                'fixed_float': 1.0,
                'fixed_string': 'fixed_string2'
            },
            {
                'fixed_int': 2,
                'fixed_float': 0.0,
                'fixed_string': 'fixed_string3'
            },
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create([
                    self._makeExample(**kwargs).SerializeToString()
                    for kwargs in example_kwargs
                ],
                                          reshuffle=False)
                | 'DecodeToRecordBatch' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    self.assertIn(constants.FEATURES_KEY, got[0])
                    self.assertLen(got[0][constants.FEATURES_KEY],
                                   4)  # 4 features
                    self.assertIn('example_weight',
                                  got[0][constants.FEATURES_KEY])
                    # Arrays of type np.object won't compare with assertAllClose
                    self.assertEqual(
                        got[0][constants.FEATURES_KEY]
                        ['example_weight'].tolist(), [None, None, None])
                    self.assertIn('fixed_int', got[0][constants.FEATURES_KEY])
                    self.assertAllClose(
                        got[0][constants.FEATURES_KEY]['fixed_int'],
                        np.array([1, 1, 2]))
                    self.assertIn('fixed_float',
                                  got[0][constants.FEATURES_KEY])
                    self.assertAllClose(
                        got[0][constants.FEATURES_KEY]['fixed_float'],
                        np.array([1.0, 1.0, 0.0]))
                    self.assertIn('fixed_string',
                                  got[0][constants.FEATURES_KEY])
                    # Arrays of type np.object won't compare with assertAllClose
                    self.assertEqual(
                        got[0][
                            constants.FEATURES_KEY]['fixed_string'].tolist(),
                        [b'fixed_string1', b'fixed_string2', b'fixed_string3'])
                    self.assertIn(constants.INPUT_KEY, got[0])
                    self.assertLen(got[0][constants.INPUT_KEY],
                                   3)  # 3 examples

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 27
0
    def testPredictionsExtractorWithoutEvalSharedModel(self):
        model_spec1 = config_pb2.ModelSpec(name='model1',
                                           prediction_key='prediction')
        model_spec2 = config_pb2.ModelSpec(name='model2',
                                           prediction_keys={
                                               'output1': 'prediction1',
                                               'output2': 'prediction2'
                                           })
        eval_config = config_pb2.EvalConfig(
            model_specs=[model_spec1, model_spec2])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        prediction_extractor = predictions_extractor.PredictionsExtractor(
            eval_config)

        schema = text_format.Parse(
            """
        feature {
          name: "prediction"
          type: FLOAT
        }
        feature {
          name: "prediction1"
          type: FLOAT
        }
        feature {
          name: "prediction2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        examples = [
            self._makeExample(prediction=1.0,
                              prediction1=1.0,
                              prediction2=0.0,
                              fixed_int=1),
            self._makeExample(prediction=1.0,
                              prediction1=1.0,
                              prediction2=1.0,
                              fixed_int=1)
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | prediction_extractor.stage_name >>
                prediction_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name,
                                      got[0][constants.PREDICTIONS_KEY][0])
                    self.assertAlmostEqual(
                        got[0][constants.PREDICTIONS_KEY][0]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.PREDICTIONS_KEY][0]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([0.0])
                        })

                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name,
                                      got[0][constants.PREDICTIONS_KEY][1])
                    self.assertAlmostEqual(
                        got[0][constants.PREDICTIONS_KEY][1]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.PREDICTIONS_KEY][1]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([1.0])
                        })

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 28
0
    def testPreprocessedFeaturesExtractor(self, save_as_keras,
                                          preprocessing_function_names,
                                          expected_extract_keys):
        export_path = self.createModelWithMultipleDenseInputs(save_as_keras)

        eval_config = config_pb2.EvalConfig(model_specs=[
            config_pb2.ModelSpec(
                preprocessing_function_names=preprocessing_function_names)
        ])
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=export_path, tags=[tf.saved_model.SERVING])
        schema = self.createDenseInputsSchema()
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        transformation_extractor = (
            transformed_features_extractor.TransformedFeaturesExtractor(
                eval_config=eval_config,
                eval_shared_model=eval_shared_model,
                tensor_adapter_config=tensor_adapter_config))

        examples = [
            self._makeExample(input_1=1.0, input_2=2.0),
            self._makeExample(input_1=3.0, input_2=4.0),
            self._makeExample(input_1=5.0, input_2=6.0),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | transformation_extractor.stage_name >>
                transformation_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 2)
                    for item in got:
                        for extracts_key, feature_keys in expected_extract_keys.items(
                        ):
                            self.assertIn(extracts_key, item)
                            for value in item[extracts_key]:
                                self.assertEqual(set(feature_keys),
                                                 set(value.keys()),
                                                 msg='got={}'.format(item))

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
    def testExampleWeightsExtractor(self, example_weight):
        model_spec = config_pb2.ModelSpec(example_weight_key=example_weight)
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        example_weight_extractor = (
            example_weights_extractor.ExampleWeightsExtractor(eval_config))

        example_weight_feature = ''
        if example_weight is not None:
            example_weight_feature = """
          feature {
            name: "%s"
            type: FLOAT
          }
          """ % example_weight
        schema = text_format.Parse(
            example_weight_feature + """
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        def maybe_add_key(d, key, value):
            if key is not None:
                d[key] = value
            return d

        example_kwargs = [
            maybe_add_key({
                'fixed_int': 1,
            }, example_weight, 0.5),
            maybe_add_key({
                'fixed_int': 1,
            }, example_weight, 0.0),
            maybe_add_key({
                'fixed_int': 2,
            }, example_weight, 1.0),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create([
                    self._makeExample(**kwargs).SerializeToString()
                    for kwargs in example_kwargs
                ],
                                          reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | example_weight_extractor.stage_name >>
                example_weight_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    if example_weight:
                        self.assertAllClose(
                            got[0][constants.EXAMPLE_WEIGHTS_KEY],
                            np.array([0.5, 0.0, 1.0]))
                    else:
                        self.assertNotIn(constants.EXAMPLE_WEIGHTS_KEY, got[0])

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
Exemplo n.º 30
0
    def testBatchSizeLimitWithKerasModel(self):
        input1 = tf.keras.layers.Input(shape=(1, ),
                                       batch_size=1,
                                       name='input1')
        input2 = tf.keras.layers.Input(shape=(1, ),
                                       batch_size=1,
                                       name='input2')

        inputs = [input1, input2]
        input_layer = tf.keras.layers.concatenate(inputs)

        def add_1(tensor):
            return tf.add_n([tensor, tf.constant(1.0, shape=(1, 2))])

        assert_layer = tf.keras.layers.Lambda(add_1)(input_layer)

        model = tf.keras.models.Model(inputs, assert_layer)
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=.001),
                      loss=tf.keras.losses.binary_crossentropy,
                      metrics=['accuracy'])

        export_dir = self._getExportDir()
        model.save(export_dir, save_format='tf')

        eval_config = config_pb2.EvalConfig(
            model_specs=[config_pb2.ModelSpec()])
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
        schema = text_format.Parse(
            """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "input1"
              value {
                dense_tensor {
                  column_name: "input1"
                  shape { dim { size: 1 } }
                }
              }
            }
            tensor_representation {
              key: "input2"
              value {
                dense_tensor {
                  column_name: "input2"
                  shape { dim { size: 1 } }
                }
              }
            }
          }
        }
        feature {
          name: "input1"
          type: FLOAT
        }
        feature {
          name: "input2"
          type: FLOAT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        prediction_extractor = predictions_extractor.PredictionsExtractor(
            eval_config=eval_config,
            eval_shared_model=eval_shared_model,
            tensor_adapter_config=tensor_adapter_config)

        examples = []
        for _ in range(4):
            examples.append(self._makeExample(input1=0.0, input2=1.0))

        with beam.Pipeline() as pipeline:
            predict_extracts = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=1)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | prediction_extractor.stage_name >>
                prediction_extractor.ptransform)

            # pylint: enable=no-value-for-parameter
            def check_result(got):
                try:
                    self.assertLen(got, 4)
                    # We can't verify the actual predictions, but we can verify the keys.
                    for item in got:
                        self.assertIn(constants.PREDICTIONS_KEY, item)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(predict_extracts, check_result, label='result')