コード例 #1
0
    def testMakeSklearnPredictExtractor(self):
        """Tests that predictions are made from extracts for a single model."""
        feature_extractor = features_extractor.FeaturesExtractor(
            self._eval_config)
        prediction_extractor = (
            sklearn_predict_extractor._make_sklearn_predict_extractor(
                self._eval_shared_model))
        with beam.Pipeline() as pipeline:
            predict_extracts = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in self._examples])
                | 'BatchExamples' >> self._tfx_io.BeamSource()
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()  # pylint: disable=no-value-for-parameter
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | prediction_extractor.stage_name >>
                prediction_extractor.ptransform)

            def check_result(actual):
                try:
                    for item in actual:
                        self.assertEqual(item['labels'].shape,
                                         item['predictions'].shape)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(predict_extracts, check_result)
コード例 #2
0
  def testMultiModelPredict(self):
    temp_eval_export_dir = self._getEvalExportDir()
    _, model1_dir = linear_classifier.simple_linear_classifier(
        None, temp_eval_export_dir)
    model1 = model_eval_lib.default_eval_shared_model(
        eval_saved_model_path=model1_dir)
    _, model2_dir = linear_classifier.simple_linear_classifier(
        None, temp_eval_export_dir)
    model2 = model_eval_lib.default_eval_shared_model(
        eval_saved_model_path=model2_dir)
    eval_shared_model = {'model1': model1, 'model2': model2}
    eval_config = config.EvalConfig(model_specs=[
        config.ModelSpec(name='model1', example_weight_key='age'),
        config.ModelSpec(name='model2', example_weight_key='age')
    ])

    tfx_io = raw_tf_record.RawBeamRecordTFXIO(
        physical_format='inmemory',
        raw_record_column_name=constants.ARROW_INPUT_COLUMN,
        telemetry_descriptors=['TFMATest'])
    extractor = predict_extractor.PredictExtractor(
        eval_shared_model, eval_config=eval_config)
    with beam.Pipeline() as pipeline:
      examples = [
          self._makeExample(age=3.0, language='english', label=1.0),
          self._makeExample(age=3.0, language='chinese', label=0.0),
          self._makeExample(age=4.0, language='english', label=1.0),
          self._makeExample(age=5.0, language='chinese', label=0.0),
      ]
      serialized_examples = [e.SerializeToString() for e in examples]

      predict_extracts = (
          pipeline
          | beam.Create(serialized_examples, reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | 'Predict' >> extractor.ptransform)

      def check_result(got):
        try:
          self.assertLen(got, 2)
          for item in got:
            self.assertIn(constants.FEATURES_KEY, item)
            for feature in ('language', 'age'):
              for features_dict in item[constants.FEATURES_KEY]:
                self.assertIn(feature, features_dict)
            self.assertIn(constants.LABELS_KEY, item)
            self.assertIn(constants.PREDICTIONS_KEY, item)
            for model in ('model1', 'model2'):
              for predictions_dict in item[constants.PREDICTIONS_KEY]:
                self.assertIn(model, predictions_dict)
            self.assertIn(constants.EXAMPLE_WEIGHTS_KEY, item)
            for i in range(len(item[constants.FEATURES_KEY])):
              self.assertAlmostEqual(item[constants.FEATURES_KEY][i]['age'],
                                     item[constants.EXAMPLE_WEIGHTS_KEY][i])

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(predict_extracts, check_result)
コード例 #3
0
    def test_features_extractor_no_features(self):
        model_spec = config_pb2.ModelSpec()
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        tfx_io = tf_example_record.TFExampleBeamRecord(
            raw_record_column_name=constants.ARROW_INPUT_COLUMN,
            physical_format='inmem',
            telemetry_descriptors=['testing'])

        with beam.Pipeline() as pipeline:
            result = (
                pipeline | 'Create' >> beam.Create([b''] * 3)
                | 'DecodeToRecordBatch' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform)

            def check_result(got):
                self.assertLen(got, 1)
                self.assertIn(constants.FEATURES_KEY, got[0])
                self.assertEmpty(got[0][constants.FEATURES_KEY])
                self.assertIn(constants.INPUT_KEY, got[0])
                self.assertLen(got[0][constants.INPUT_KEY], 3)

            util.assert_that(result, check_result, label='CheckResult')
コード例 #4
0
  def testBatchSizeLimit(self):
    temp_export_dir = self._getExportDir()
    _, export_dir = batch_size_limited_classifier.simple_batch_size_limited_classifier(
        None, temp_export_dir)
    eval_shared_model = self.createTestEvalSharedModel(
        eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
    eval_config = config.EvalConfig(model_specs=[config.ModelSpec()])
    schema = text_format.Parse(
        """
        feature {
          name: "classes"
          type: BYTES
        }
        feature {
          name: "scores"
          type: FLOAT
        }
        feature {
          name: "labels"
          type: BYTES
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
        arrow_schema=tfx_io.ArrowSchema(),
        tensor_representations=tfx_io.TensorRepresentations())
    feature_extractor = features_extractor.FeaturesExtractor(eval_config)
    prediction_extractor = predictions_extractor.PredictionsExtractor(
        eval_config=eval_config,
        eval_shared_model=eval_shared_model,
        tensor_adapter_config=tensor_adapter_config)

    examples = []
    for _ in range(4):
      examples.append(
          self._makeExample(classes='first', scores=0.0, labels='third'))

    with beam.Pipeline() as pipeline:
      predict_extracts = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=1)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | feature_extractor.stage_name >> feature_extractor.ptransform
          | prediction_extractor.stage_name >> prediction_extractor.ptransform)

      def check_result(got):
        try:
          self.assertLen(got, 4)
          # We can't verify the actual predictions, but we can verify the keys.
          for item in got:
            self.assertIn(constants.PREDICTIONS_KEY, item)

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(predict_extracts, check_result, label='result')
コード例 #5
0
  def testSqlSliceKeyExtractorWithMultipleSchema(self):
    eval_config = config_pb2.EvalConfig(slicing_specs=[
        config_pb2.SlicingSpec(slice_keys_sql="""
        SELECT
          STRUCT(fixed_string)
        FROM
          example.fixed_string,
          example.fixed_int
        WHERE fixed_int = 1
        """)
    ])
    slice_key_extractor = sql_slice_key_extractor.SqlSliceKeyExtractor(
        eval_config)

    record_batch_1 = pa.RecordBatch.from_arrays([
        pa.array([[1], [1], [2]], type=pa.list_(pa.int64())),
        pa.array([[1.0], [1.0], [2.0]], type=pa.list_(pa.float64())),
        pa.array([['fixed_string1'], ['fixed_string2'], ['fixed_string3']],
                 type=pa.list_(pa.string())),
    ], ['fixed_int', 'fixed_float', 'fixed_string'])
    record_batch_2 = pa.RecordBatch.from_arrays([
        pa.array([[1], [1], [2]], type=pa.list_(pa.int64())),
        pa.array([[1.0], [1.0], [2.0]], type=pa.list_(pa.float64())),
        pa.array([['fixed_string1'], ['fixed_string2'], ['fixed_string3']],
                 type=pa.list_(pa.string())),
        pa.array([['extra_field1'], ['extra_field2'], ['extra_field3']],
                 type=pa.list_(pa.string())),
    ], ['fixed_int', 'fixed_float', 'fixed_string', 'extra_field'])

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create([record_batch_1, record_batch_2],
                                    reshuffle=False)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | slice_key_extractor.stage_name >> slice_key_extractor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 2)
          self.assertEqual(got[0][constants.SLICE_KEY_TYPES_KEY],
                           [[(('fixed_string', 'fixed_string1'),)],
                            [(('fixed_string', 'fixed_string2'),)], []])
          self.assertEqual(got[1][constants.SLICE_KEY_TYPES_KEY],
                           [[(('fixed_string', 'fixed_string1'),)],
                            [(('fixed_string', 'fixed_string2'),)], []])

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result)
コード例 #6
0
    def testSqlSliceKeyExtractorWithEmptySqlConfig(self):
        eval_config = config_pb2.EvalConfig()
        feature_extractor = features_extractor.FeaturesExtractor(
            eval_config=eval_config)
        slice_key_extractor = sql_slice_key_extractor.SqlSliceKeyExtractor(
            eval_config)

        tfx_io = tf_example_record.TFExampleBeamRecord(
            physical_format='inmem',
            telemetry_descriptors=['test', 'component'],
            schema=_SCHEMA,
            raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        examples = [
            self._makeExample(fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string2'),
            self._makeExample(fixed_int=2,
                              fixed_float=0.0,
                              fixed_string='fixed_string3')
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | slice_key_extractor.stage_name >>
                slice_key_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    np.testing.assert_equal(
                        got[0][constants.SLICE_KEY_TYPES_KEY],
                        types.VarLenTensorValue.from_dense_rows(
                            [np.array([]),
                             np.array([]),
                             np.array([])]))

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result)
コード例 #7
0
  def testSqlSliceKeyExtractor(self):
    eval_config = config_pb2.EvalConfig(slicing_specs=[
        config_pb2.SlicingSpec(slice_keys_sql="""
        SELECT
          STRUCT(fixed_string)
        FROM
          example.fixed_string,
          example.fixed_int
        WHERE fixed_int = 1
        """)
    ])
    slice_key_extractor = sql_slice_key_extractor.SqlSliceKeyExtractor(
        eval_config)

    tfx_io = tf_example_record.TFExampleBeamRecord(
        physical_format='inmem',
        telemetry_descriptors=['test', 'component'],
        schema=_SCHEMA,
        raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    examples = [
        self._makeExample(
            fixed_int=1, fixed_float=1.0, fixed_string='fixed_string1'),
        self._makeExample(
            fixed_int=1, fixed_float=1.0, fixed_string='fixed_string2'),
        self._makeExample(
            fixed_int=2, fixed_float=0.0, fixed_string='fixed_string3')
    ]

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | slice_key_extractor.stage_name >> slice_key_extractor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 1)
          self.assertEqual(got[0][constants.SLICE_KEY_TYPES_KEY],
                           [[(('fixed_string', 'fixed_string1'),)],
                            [(('fixed_string', 'fixed_string2'),)], []])

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result)
コード例 #8
0
    def testMakeSklearnPredictExtractorWithMultiModels(self):
        """Tests that predictions are made from extracts for multiple models."""
        eval_config = config.EvalConfig(model_specs=[
            config.ModelSpec(name='model1'),
            config.ModelSpec(name='model2'),
        ])
        eval_export_dir_1 = os.path.join(self._eval_export_dir, '1')
        self._create_sklearn_model(eval_export_dir_1)
        eval_shared_model_1 = sklearn_predict_extractor.custom_eval_shared_model(
            eval_saved_model_path=eval_export_dir_1,
            model_name='model1',
            eval_config=eval_config)
        eval_export_dir_2 = os.path.join(self._eval_export_dir, '2')
        self._create_sklearn_model(eval_export_dir_2)
        eval_shared_model_2 = sklearn_predict_extractor.custom_eval_shared_model(
            eval_saved_model_path=eval_export_dir_2,
            model_name='model2',
            eval_config=eval_config)

        feature_extractor = features_extractor.FeaturesExtractor(
            self._eval_config)
        prediction_extractor = (
            sklearn_predict_extractor._make_sklearn_predict_extractor(
                eval_shared_model={
                    'model1': eval_shared_model_1,
                    'model2': eval_shared_model_2,
                }))
        with beam.Pipeline() as pipeline:
            predict_extracts = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in self._examples])
                | 'BatchExamples' >> self._tfx_io.BeamSource()
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()  # pylint: disable=no-value-for-parameter
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | prediction_extractor.stage_name >>
                prediction_extractor.ptransform)

            def check_result(actual):
                try:
                    for item in actual:
                        self.assertEqual(item['labels'].shape,
                                         item['predictions'].shape)
                        self.assertIn('model1', item['predictions'][0])
                        self.assertIn('model2', item['predictions'][0])

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(predict_extracts, check_result)
コード例 #9
0
    def _runTestWithCustomCheck(self,
                                examples,
                                eval_export_dir,
                                metrics_callbacks,
                                slice_spec=None,
                                custom_metrics_check=None,
                                custom_plots_check=None,
                                custom_result_check=None):
        # make sure we are doing some checks
        self.assertTrue(custom_metrics_check is not None
                        or custom_plots_check is not None
                        or custom_result_check is not None)
        serialized_examples = [ex.SerializeToString() for ex in examples]
        slicing_specs = None
        if slice_spec:
            slicing_specs = [s.to_proto() for s in slice_spec]
        eval_config = config.EvalConfig(slicing_specs=slicing_specs)
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=eval_export_dir,
            add_metrics_callbacks=metrics_callbacks)
        extractors = model_eval_lib.default_extractors(
            eval_config=eval_config, eval_shared_model=eval_shared_model)
        tfx_io = raw_tf_record.RawBeamRecordTFXIO(
            physical_format='inmemory',
            raw_record_column_name=constants.ARROW_INPUT_COLUMN,
            telemetry_descriptors=['TFMATest'])
        with beam.Pipeline() as pipeline:
            (metrics, plots), _ = (
                pipeline
                | 'Create' >> beam.Create(serialized_examples)
                | 'BatchExamples' >> tfx_io.BeamSource()
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | 'Extract' >> tfma_unit.Extract(extractors=extractors)  # pylint: disable=no-value-for-parameter
                | 'ComputeMetricsAndPlots' >>
                legacy_metrics_and_plots_evaluator._ComputeMetricsAndPlots(  # pylint: disable=protected-access
                    eval_shared_model=eval_shared_model,
                    compute_confidence_intervals=self.
                    compute_confidence_intervals,
                    random_seed_for_testing=self.deterministic_test_seed))
            if custom_metrics_check is not None:
                util.assert_that(metrics,
                                 custom_metrics_check,
                                 label='metrics')
            if custom_plots_check is not None:
                util.assert_that(plots, custom_plots_check, label='plot')

        result = pipeline.run()
        if custom_result_check is not None:
            custom_result_check(result)
  def testBatchedInputExtractor(self):
    model_spec = config.ModelSpec(
        label_key='label', example_weight_key='example_weight')
    eval_config = config.EvalConfig(model_specs=[model_spec])
    input_extractor = batched_input_extractor.BatchedInputExtractor(eval_config)

    schema = text_format.Parse(
        """
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    examples = [
        self._makeExample(
            label=1.0,
            example_weight=0.5,
            fixed_int=1,
            fixed_float=1.0,
            fixed_string='fixed_string1'),
        self._makeExample(
            label=0.0,
            example_weight=0.0,
            fixed_int=1,
            fixed_float=1.0,
            fixed_string='fixed_string2'),
        self._makeExample(
            label=0.0,
            example_weight=1.0,
            fixed_int=2,
            fixed_float=0.0,
            fixed_string='fixed_string3')
    ]

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | input_extractor.stage_name >> input_extractor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 1)
          self.assertDictElementsAlmostEqual(got[0][constants.FEATURES_KEY][0],
                                             {
                                                 'fixed_int': np.array([1]),
                                                 'fixed_float': np.array([1.0]),
                                             })
          self.assertEqual(got[0][constants.FEATURES_KEY][0]['fixed_string'],
                           np.array([b'fixed_string1']))
          self.assertAlmostEqual(got[0][constants.LABELS_KEY][0],
                                 np.array([1.0]))
          self.assertAlmostEqual(got[0][constants.EXAMPLE_WEIGHTS_KEY][0],
                                 np.array([0.5]))
          self.assertDictElementsAlmostEqual(got[0][constants.FEATURES_KEY][1],
                                             {
                                                 'fixed_int': np.array([1]),
                                                 'fixed_float': np.array([1.0]),
                                             })
          self.assertEqual(got[0][constants.FEATURES_KEY][1]['fixed_string'],
                           np.array([b'fixed_string2']))
          self.assertAlmostEqual(got[0][constants.LABELS_KEY][1],
                                 np.array([0.0]))
          self.assertAlmostEqual(got[0][constants.EXAMPLE_WEIGHTS_KEY][1],
                                 np.array([0.0]))
          self.assertDictElementsAlmostEqual(got[0][constants.FEATURES_KEY][2],
                                             {
                                                 'fixed_int': np.array([2]),
                                                 'fixed_float': np.array([0.0]),
                                             })
          self.assertEqual(got[0][constants.FEATURES_KEY][2]['fixed_string'],
                           np.array([b'fixed_string3']))
          self.assertAlmostEqual(got[0][constants.LABELS_KEY][2],
                                 np.array([0.0]))
          self.assertAlmostEqual(got[0][constants.EXAMPLE_WEIGHTS_KEY][2],
                                 np.array([1.0]))

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result, label='result')
コード例 #11
0
    def testBatchedInputExtractorMultiModel(self):
        model_spec1 = config.ModelSpec(name='model1',
                                       label_key='label',
                                       example_weight_key='example_weight',
                                       prediction_key='fixed_float')
        model_spec2 = config.ModelSpec(name='model2',
                                       label_keys={
                                           'output1': 'label1',
                                           'output2': 'label2'
                                       },
                                       example_weight_keys={
                                           'output1': 'example_weight1',
                                           'output2': 'example_weight2'
                                       },
                                       prediction_keys={
                                           'output1': 'fixed_float',
                                           'output2': 'fixed_float'
                                       })
        eval_config = config.EvalConfig(model_specs=[model_spec1, model_spec2])
        input_extractor = batched_input_extractor.BatchedInputExtractor(
            eval_config)

        schema = text_format.Parse(
            """
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "label1"
          type: FLOAT
        }
        feature {
          name: "label2"
          type: FLOAT
        }
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "example_weight1"
          type: FLOAT
        }
        feature {
          name: "example_weight2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.BATCHED_INPUT_KEY)

        examples = [
            self._makeExample(label=1.0,
                              label1=1.0,
                              label2=0.0,
                              example_weight=0.5,
                              example_weight1=0.5,
                              example_weight2=0.5,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(label=1.0,
                              label1=1.0,
                              label2=1.0,
                              example_weight=0.0,
                              example_weight1=0.0,
                              example_weight2=1.0,
                              fixed_int=1,
                              fixed_float=2.0,
                              fixed_string='fixed_string2'),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | input_extractor.stage_name >> input_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_FEATURES_KEY][0], {
                            'fixed_int': np.array([1]),
                        })
                    self.assertEqual(
                        got[0][constants.BATCHED_FEATURES_KEY][0]
                        ['fixed_string'], np.array([b'fixed_string1']))
                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name,
                                      got[0][constants.BATCHED_LABELS_KEY][0])
                        self.assertIn(
                            model_name,
                            got[0][constants.BATCHED_EXAMPLE_WEIGHTS_KEY][0])
                        self.assertIn(
                            model_name,
                            got[0][constants.BATCHED_PREDICTIONS_KEY][0])
                    self.assertAlmostEqual(
                        got[0][constants.BATCHED_LABELS_KEY][0]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_LABELS_KEY][0]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([0.0])
                        })
                    self.assertAlmostEqual(
                        got[0][constants.BATCHED_EXAMPLE_WEIGHTS_KEY][0]
                        ['model1'], np.array([0.5]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_EXAMPLE_WEIGHTS_KEY][0]
                        ['model2'], {
                            'output1': np.array([0.5]),
                            'output2': np.array([0.5])
                        })
                    self.assertAlmostEqual(
                        got[0][constants.BATCHED_PREDICTIONS_KEY][0]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_PREDICTIONS_KEY][0]['model2'],
                        {
                            'output1': np.array([1.0]),
                            'output2': np.array([1.0])
                        })

                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_FEATURES_KEY][1], {
                            'fixed_int': np.array([1]),
                        })
                    self.assertEqual(
                        got[0][constants.BATCHED_FEATURES_KEY][1]
                        ['fixed_string'], np.array([b'fixed_string2']))
                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name,
                                      got[0][constants.BATCHED_LABELS_KEY][1])
                        self.assertIn(
                            model_name,
                            got[0][constants.BATCHED_EXAMPLE_WEIGHTS_KEY][1])
                        self.assertIn(
                            model_name,
                            got[0][constants.BATCHED_PREDICTIONS_KEY][1])
                    self.assertAlmostEqual(
                        got[0][constants.BATCHED_LABELS_KEY][1]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_LABELS_KEY][1]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([1.0])
                        })
                    self.assertAlmostEqual(
                        got[0][constants.BATCHED_EXAMPLE_WEIGHTS_KEY][1]
                        ['model1'], np.array([0.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_EXAMPLE_WEIGHTS_KEY][1]
                        ['model2'], {
                            'output1': np.array([0.0]),
                            'output2': np.array([1.0])
                        })
                    self.assertAlmostEqual(
                        got[0][constants.BATCHED_PREDICTIONS_KEY][1]['model1'],
                        np.array([2.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.BATCHED_PREDICTIONS_KEY][1]['model2'],
                        {
                            'output1': np.array([2.0]),
                            'output2': np.array([2.0])
                        })

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #12
0
  def testPredictionsExtractorWithoutEvalSharedModel(self):
    model_spec1 = config_pb2.ModelSpec(
        name='model1', prediction_key='prediction')
    model_spec2 = config_pb2.ModelSpec(
        name='model2',
        prediction_keys={
            'output1': 'prediction1',
            'output2': 'prediction2'
        })
    eval_config = config_pb2.EvalConfig(model_specs=[model_spec1, model_spec2])
    schema = text_format.Parse(
        """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "fixed_int"
              value {
                dense_tensor {
                  column_name: "fixed_int"
                }
              }
            }
          }
        }
        feature {
          name: "prediction"
          type: FLOAT
        }
        feature {
          name: "prediction1"
          type: FLOAT
        }
        feature {
          name: "prediction2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
        arrow_schema=tfx_io.ArrowSchema(),
        tensor_representations=tfx_io.TensorRepresentations())
    feature_extractor = features_extractor.FeaturesExtractor(
        eval_config=eval_config,
        tensor_representations=tensor_adapter_config.tensor_representations)
    prediction_extractor = predictions_extractor.PredictionsExtractor(
        eval_config)

    examples = [
        self._makeExample(
            prediction=1.0, prediction1=1.0, prediction2=0.0, fixed_int=1),
        self._makeExample(
            prediction=1.0, prediction1=1.0, prediction2=1.0, fixed_int=1)
    ]

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | feature_extractor.stage_name >> feature_extractor.ptransform
          | prediction_extractor.stage_name >> prediction_extractor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 1)
          for model_name in ('model1', 'model2'):
            self.assertIn(model_name, got[0][constants.PREDICTIONS_KEY])
          self.assertAllClose(got[0][constants.PREDICTIONS_KEY]['model1'],
                              np.array([1.0, 1.0]))
          self.assertAllClose(got[0][constants.PREDICTIONS_KEY]['model2'], {
              'output1': np.array([1.0, 1.0]),
              'output2': np.array([0.0, 1.0])
          })

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result, label='result')
コード例 #13
0
  def testTFJSPredictExtractorWithKerasModel(self, multi_model, multi_output):
    if not _TFJS_IMPORTED:
      self.skipTest('This test requires TensorFlow JS.')

    input1 = tf.keras.layers.Input(shape=(1,), name='input1')
    input2 = tf.keras.layers.Input(shape=(1,), name='input2')
    inputs = [input1, input2]
    input_layer = tf.keras.layers.concatenate(inputs)
    output_layers = {}
    output_layers['output1'] = (
        tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
                              name='output1')(input_layer))
    if multi_output:
      output_layers['output2'] = (
          tf.keras.layers.Dense(1, activation=tf.nn.sigmoid,
                                name='output2')(input_layer))

    model = tf.keras.models.Model(inputs, output_layers)
    model.compile(
        optimizer=tf.keras.optimizers.Adam(lr=.001),
        loss=tf.keras.losses.binary_crossentropy,
        metrics=['accuracy'])

    train_features = {'input1': [[0.0], [1.0]], 'input2': [[1.0], [0.0]]}
    labels = {'output1': [[1], [0]]}
    if multi_output:
      labels['output2'] = [[1], [0]]

    example_weights = {'output1': [1.0, 0.5]}
    if multi_output:
      example_weights['output2'] = [1.0, 0.5]
    dataset = tf.data.Dataset.from_tensor_slices(
        (train_features, labels, example_weights))
    dataset = dataset.shuffle(buffer_size=1).repeat().batch(2)
    model.fit(dataset, steps_per_epoch=1)

    src_model_path = tempfile.mkdtemp()
    model.save(src_model_path)

    dst_model_path = tempfile.mkdtemp()
    converter.convert([
        '--input_format=tf_saved_model',
        '--saved_model_tags=serve',
        '--signature_name=serving_default',
        src_model_path,
        dst_model_path,
    ])

    model_specs = [config_pb2.ModelSpec(name='model1', model_type='tf_js')]
    if multi_model:
      model_specs.append(
          config_pb2.ModelSpec(name='model2', model_type='tf_js'))

    eval_config = config_pb2.EvalConfig(model_specs=model_specs)
    eval_shared_models = [
        self.createTestEvalSharedModel(
            model_name='model1',
            eval_saved_model_path=dst_model_path,
            model_type='tf_js')
    ]
    if multi_model:
      eval_shared_models.append(
          self.createTestEvalSharedModel(
              model_name='model2',
              eval_saved_model_path=dst_model_path,
              model_type='tf_js'))

    schema = text_format.Parse(
        """
        feature {
          name: "input1"
          type: FLOAT
        }
        feature {
          name: "input2"
          type: FLOAT
        }
        feature {
          name: "non_model_feature"
          type: INT
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    feature_extractor = features_extractor.FeaturesExtractor(eval_config)
    predictor = tfjs_predict_extractor.TFJSPredictExtractor(
        eval_config=eval_config, eval_shared_model=eval_shared_models)

    examples = [
        self._makeExample(input1=0.0, input2=1.0, non_model_feature=0),
        self._makeExample(input1=1.0, input2=0.0, non_model_feature=1),
    ]

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | feature_extractor.stage_name >> feature_extractor.ptransform
          | predictor.stage_name >> predictor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 1)
          got = got[0]
          self.assertIn(constants.PREDICTIONS_KEY, got)
          for model in ('model1', 'model2') if multi_model else (''):
            per_model_result = got[constants.PREDICTIONS_KEY]
            if model:
              self.assertIn(model, per_model_result)
              per_model_result = per_model_result[model]
            for output in ('Identity', 'Identity_1') if multi_output else (''):
              per_output_result = per_model_result
              if output:
                self.assertIn(output, per_output_result)
                per_output_result = per_output_result[output]
              self.assertLen(per_output_result, 2)

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result, label='result')
コード例 #14
0
    def testLabelsExtractorMultiModel(self):
        model_spec1 = config.ModelSpec(name='model1', label_key='label')
        model_spec2 = config.ModelSpec(name='model2',
                                       label_keys={
                                           'output1': 'label1',
                                           'output2': 'label2'
                                       })
        eval_config = config.EvalConfig(model_specs=[model_spec1, model_spec2])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        label_extractor = labels_extractor.LabelsExtractor(eval_config)

        schema = text_format.Parse(
            """
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "label1"
          type: FLOAT
        }
        feature {
          name: "label2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        examples = [
            self._makeExample(label=1.0, label1=1.0, label2=0.0, fixed_int=1),
            self._makeExample(label=1.0, label1=1.0, label2=1.0, fixed_int=1)
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | label_extractor.stage_name >> label_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name,
                                      got[0][constants.LABELS_KEY][0])
                    self.assertAlmostEqual(
                        got[0][constants.LABELS_KEY][0]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.LABELS_KEY][0]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([0.0])
                        })

                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name,
                                      got[0][constants.LABELS_KEY][1])
                    self.assertAlmostEqual(
                        got[0][constants.LABELS_KEY][1]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.LABELS_KEY][1]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([1.0])
                        })

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #15
0
    def testExampleWeightsExtractorMultiOutput(self):
        model_spec = config_pb2.ModelSpec(
            example_weight_keys={
                'output1': 'example_weight1',
                'output2': 'example_weight2',
                'output3': 'example_weight3',
            })
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        example_weight_extractor = example_weights_extractor.ExampleWeightsExtractor(
            eval_config)

        schema = text_format.Parse(
            """
        feature {
          name: "example_weight1"
          type: FLOAT
        }
        feature {
          name: "example_weight2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        examples = [
            self._makeExample(example_weight1=0.5,
                              example_weight2=0.5,
                              fixed_int=1),
            self._makeExample(example_weight1=0.0,
                              example_weight2=1.0,
                              fixed_int=1)
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | example_weight_extractor.stage_name >>
                example_weight_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    self.assertAllClose(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY], {
                            'output1': np.array([0.5, 0.0]),
                            'output2': np.array([0.5, 1.0]),
                        })

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #16
0
    def testPredictExtractorWithRegressionModel(self):
        temp_export_dir = self._getExportDir()
        export_dir, _ = (fixed_prediction_estimator_extra_fields.
                         simple_fixed_prediction_estimator_extra_fields(
                             temp_export_dir, None))

        eval_config = config.EvalConfig(model_specs=[config.ModelSpec()])
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
        schema = text_format.Parse(
            """
        feature {
          name: "prediction"
          type: FLOAT
        }
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.BATCHED_INPUT_KEY)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        input_extractor = batched_input_extractor.BatchedInputExtractor(
            eval_config)
        predict_extractor = batched_predict_extractor_v2.BatchedPredictExtractor(
            eval_config=eval_config,
            eval_shared_model=eval_shared_model,
            tensor_adapter_config=tensor_adapter_config)

        examples = [
            self._makeExample(prediction=0.2,
                              label=1.0,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(prediction=0.8,
                              label=0.0,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string2'),
            self._makeExample(prediction=0.5,
                              label=0.0,
                              fixed_int=2,
                              fixed_float=1.0,
                              fixed_string='fixed_string3')
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | input_extractor.stage_name >> input_extractor.ptransform
                | predict_extractor.stage_name >> predict_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    self.assertIn(constants.BATCHED_PREDICTIONS_KEY, got[0])
                    expected_preds = [0.2, 0.8, 0.5]
                    self.assertAlmostEqual(
                        got[0][constants.BATCHED_PREDICTIONS_KEY],
                        expected_preds)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
    def testWriteValidationResults(self):
        model_dir, baseline_dir = self._getExportDir(), self._getBaselineDir()
        eval_shared_model = self._build_keras_model(model_dir, mul=0)
        baseline_eval_shared_model = self._build_keras_model(baseline_dir,
                                                             mul=1)
        validations_file = os.path.join(self._getTempDir(),
                                        constants.VALIDATIONS_KEY)
        schema = text_format.Parse(
            """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "input"
              value {
                dense_tensor {
                  column_name: "input"
                  shape { dim { size: 1 } }
                }
              }
            }
          }
        }
        feature {
          name: "input"
          type: FLOAT
        }
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "extra_feature"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        examples = [
            self._makeExample(input=0.0,
                              label=1.0,
                              example_weight=1.0,
                              extra_feature='non_model_feature'),
            self._makeExample(input=1.0,
                              label=0.0,
                              example_weight=0.5,
                              extra_feature='non_model_feature'),
        ]

        eval_config = config.EvalConfig(
            model_specs=[
                config.ModelSpec(name='candidate',
                                 label_key='label',
                                 example_weight_key='example_weight'),
                config.ModelSpec(name='baseline',
                                 label_key='label',
                                 example_weight_key='example_weight',
                                 is_baseline=True)
            ],
            slicing_specs=[config.SlicingSpec()],
            metrics_specs=[
                config.MetricsSpec(
                    metrics=[
                        config.MetricConfig(
                            class_name='WeightedExampleCount',
                            # 1.5 < 1, NOT OK.
                            threshold=config.MetricThreshold(
                                value_threshold=config.GenericValueThreshold(
                                    upper_bound={'value': 1}))),
                        config.MetricConfig(
                            class_name='ExampleCount',
                            # 2 > 10, NOT OK.
                            threshold=config.MetricThreshold(
                                value_threshold=config.GenericValueThreshold(
                                    lower_bound={'value': 10}))),
                        config.MetricConfig(
                            class_name='MeanLabel',
                            # 0 > 0 and 0 > 0%?: NOT OK.
                            threshold=config.MetricThreshold(
                                change_threshold=config.GenericChangeThreshold(
                                    direction=config.MetricDirection.
                                    HIGHER_IS_BETTER,
                                    relative={'value': 0},
                                    absolute={'value': 0}))),
                        config.MetricConfig(
                            # MeanPrediction = (0+0)/(1+0.5) = 0
                            class_name='MeanPrediction',
                            # -.01 < 0 < .01, OK.
                            # Diff% = -.333/.333 = -100% < -99%, OK.
                            # Diff = 0 - .333 = -.333 < 0, OK.
                            threshold=config.MetricThreshold(
                                value_threshold=config.GenericValueThreshold(
                                    upper_bound={'value': .01},
                                    lower_bound={'value': -.01}),
                                change_threshold=config.GenericChangeThreshold(
                                    direction=config.MetricDirection.
                                    LOWER_IS_BETTER,
                                    relative={'value': -.99},
                                    absolute={'value': 0})))
                    ],
                    model_names=['candidate', 'baseline']),
            ],
            options=config.Options(
                disabled_outputs={'values': ['eval_config.json']}),
        )
        slice_spec = [
            slicer.SingleSliceSpec(spec=s) for s in eval_config.slicing_specs
        ]
        eval_shared_models = {
            'candidate': eval_shared_model,
            'baseline': baseline_eval_shared_model
        }
        extractors = [
            batched_input_extractor.BatchedInputExtractor(eval_config),
            batched_predict_extractor_v2.BatchedPredictExtractor(
                eval_shared_model=eval_shared_models,
                eval_config=eval_config,
                tensor_adapter_config=tensor_adapter_config),
            unbatch_extractor.UnbatchExtractor(),
            slice_key_extractor.SliceKeyExtractor(slice_spec=slice_spec)
        ]
        evaluators = [
            metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
                eval_config=eval_config, eval_shared_model=eval_shared_models)
        ]
        output_paths = {
            constants.VALIDATIONS_KEY: validations_file,
        }
        writers = [
            metrics_plots_and_validations_writer.
            MetricsPlotsAndValidationsWriter(output_paths,
                                             add_metrics_callbacks=[])
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            _ = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples])
                | 'BatchExamples' >> tfx_io.BeamSource()
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | 'ExtractEvaluate' >> model_eval_lib.ExtractAndEvaluate(
                    extractors=extractors, evaluators=evaluators)
                |
                'WriteResults' >> model_eval_lib.WriteResults(writers=writers))
            # pylint: enable=no-value-for-parameter

        validation_result = model_eval_lib.load_validation_result(
            os.path.dirname(validations_file))

        expected_validations = [
            text_format.Parse(
                """
            metric_key {
              name: "weighted_example_count"
              model_name: "candidate"
            }
            metric_threshold {
              value_threshold {
                upper_bound {
                  value: 1.0
                }
              }
            }
            metric_value {
              double_value {
                value: 1.5
              }
            }
            """, validation_result_pb2.ValidationFailure()),
            text_format.Parse(
                """
            metric_key {
              name: "example_count"
            }
            metric_threshold {
              value_threshold {
                lower_bound {
                  value: 10.0
                }
              }
            }
            metric_value {
              double_value {
                value: 2.0
              }
            }
            """, validation_result_pb2.ValidationFailure()),
            text_format.Parse(
                """
            metric_key {
              name: "mean_label"
              model_name: "candidate"
              is_diff: true
            }
            metric_threshold {
              change_threshold {
                absolute {
                  value: 0.0
                }
                relative {
                  value: 0.0
                }
                direction: HIGHER_IS_BETTER
              }
            }
            metric_value {
              double_value {
                value: 0.0
              }
            }
            """, validation_result_pb2.ValidationFailure()),
        ]
        self.assertFalse(validation_result.validation_ok)
        self.assertLen(validation_result.metric_validations_per_slice, 1)
        self.assertCountEqual(
            expected_validations,
            validation_result.metric_validations_per_slice[0].failures)
コード例 #18
0
    def testTFlitePredictExtractorWithKerasModel(self, multi_model,
                                                 multi_output):
        input1 = tf.keras.layers.Input(shape=(1, ), name='input1')
        input2 = tf.keras.layers.Input(shape=(1, ), name='input2')
        inputs = [input1, input2]
        input_layer = tf.keras.layers.concatenate(inputs)
        output_layers = {}
        output_layers['output1'] = (tf.keras.layers.Dense(
            1, activation=tf.nn.sigmoid, name='output1')(input_layer))
        if multi_output:
            output_layers['output2'] = (tf.keras.layers.Dense(
                1, activation=tf.nn.sigmoid, name='output2')(input_layer))

        model = tf.keras.models.Model(inputs, output_layers)
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=.001),
                      loss=tf.keras.losses.binary_crossentropy,
                      metrics=['accuracy'])

        train_features = {'input1': [[0.0], [1.0]], 'input2': [[1.0], [0.0]]}
        labels = {'output1': [[1], [0]]}
        if multi_output:
            labels['output2'] = [[1], [0]]

        example_weights = {'output1': [1.0, 0.5]}
        if multi_output:
            example_weights['output2'] = [1.0, 0.5]
        dataset = tf.data.Dataset.from_tensor_slices(
            (train_features, labels, example_weights))
        dataset = dataset.shuffle(buffer_size=1).repeat().batch(2)
        model.fit(dataset, steps_per_epoch=1)

        converter = tf.compat.v2.lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()

        tflite_model_dir = tempfile.mkdtemp()
        with tf.io.gfile.GFile(os.path.join(tflite_model_dir, 'tflite'),
                               'wb') as f:
            f.write(tflite_model)

        model_specs = [config.ModelSpec(name='model1', model_type='tf_lite')]
        if multi_model:
            model_specs.append(
                config.ModelSpec(name='model2', model_type='tf_lite'))

        eval_config = config.EvalConfig(model_specs=model_specs)
        eval_shared_models = [
            self.createTestEvalSharedModel(
                model_name='model1',
                eval_saved_model_path=tflite_model_dir,
                model_type='tf_lite')
        ]
        if multi_model:
            eval_shared_models.append(
                self.createTestEvalSharedModel(
                    model_name='model2',
                    eval_saved_model_path=tflite_model_dir,
                    model_type='tf_lite'))

        schema = text_format.Parse(
            """
        feature {
          name: "input1"
          type: FLOAT
        }
        feature {
          name: "input2"
          type: FLOAT
        }
        feature {
          name: "non_model_feature"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        predictor = tflite_predict_extractor.TFLitePredictExtractor(
            eval_config=eval_config, eval_shared_model=eval_shared_models)

        examples = [
            self._makeExample(input1=0.0, input2=1.0, non_model_feature=0),
            self._makeExample(input1=1.0, input2=0.0, non_model_feature=1),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | predictor.stage_name >> predictor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    got = got[0]
                    self.assertIn(constants.PREDICTIONS_KEY, got)
                    self.assertLen(got[constants.PREDICTIONS_KEY], 2)

                    for item in got[constants.PREDICTIONS_KEY]:
                        if multi_model:
                            self.assertIn('model1', item)
                            self.assertIn('model2', item)
                            if multi_output:
                                self.assertIn('Identity', item['model1'])
                                self.assertIn('Identity_1', item['model1'])

                        elif multi_output:
                            self.assertIn('Identity', item)
                            self.assertIn('Identity_1', item)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

                util.assert_that(result, check_result, label='result')
コード例 #19
0
  def testBatchedInputExtractor(self, label):
    model_spec = config.ModelSpec(
        label_key=label, example_weight_key='example_weight')
    eval_config = config.EvalConfig(model_specs=[model_spec])
    input_extractor = batched_input_extractor.BatchedInputExtractor(eval_config)

    label_feature = ''
    if label is not None:
      label_feature = """
          feature {
            name: "%s"
            type: FLOAT
          }
          """ % label
    schema = text_format.Parse(
        label_feature + """
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

    def maybe_add_key(d, key, value):
      if key is not None:
        d[key] = value
      return d

    example_kwargs = [
        maybe_add_key(
            {
                'example_weight': 0.5,
                'fixed_int': 1,
                'fixed_float': 1.0,
                'fixed_string': 'fixed_string1'
            }, label, 1.0),
        maybe_add_key(
            {
                'example_weight': 0.0,
                'fixed_int': 1,
                'fixed_float': 1.0,
                'fixed_string': 'fixed_string2'
            }, label, 0.0),
        maybe_add_key(
            {
                'example_weight': 1.0,
                'fixed_int': 2,
                'fixed_float': 0.0,
                'fixed_string': 'fixed_string3'
            }, label, 0.0),
    ]

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create([
              self._makeExample(**kwargs).SerializeToString()
              for kwargs in example_kwargs
          ],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | input_extractor.stage_name >> input_extractor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 1)
          self.assertDictElementsAlmostEqual(
              got[0][constants.FEATURES_KEY][0],
              maybe_add_key(
                  {
                      'fixed_int': np.array([1]),
                      'fixed_float': np.array([1.0]),
                      'example_weight': np.array([0.5]),
                  }, label, np.array([1.0])))
          self.assertEqual(got[0][constants.FEATURES_KEY][0]['fixed_string'],
                           np.array([b'fixed_string1']))
          self.assertAlmostEqual(got[0][constants.LABELS_KEY][0],
                                 np.array([1.0]) if label is not None else None)
          self.assertAlmostEqual(got[0][constants.EXAMPLE_WEIGHTS_KEY][0],
                                 np.array([0.5]))
          self.assertDictElementsAlmostEqual(
              got[0][constants.FEATURES_KEY][1],
              maybe_add_key(
                  {
                      'fixed_int': np.array([1]),
                      'fixed_float': np.array([1.0]),
                      'example_weight': np.array([0.0]),
                  }, label, np.array([0.0])))
          self.assertEqual(got[0][constants.FEATURES_KEY][1]['fixed_string'],
                           np.array([b'fixed_string2']))
          self.assertAlmostEqual(got[0][constants.LABELS_KEY][1],
                                 np.array([0.0]) if label is not None else None)
          self.assertAlmostEqual(got[0][constants.EXAMPLE_WEIGHTS_KEY][1],
                                 np.array([0.0]))
          self.assertDictElementsAlmostEqual(
              got[0][constants.FEATURES_KEY][2],
              maybe_add_key(
                  {
                      'fixed_int': np.array([2]),
                      'fixed_float': np.array([0.0]),
                      'example_weight': np.array([1.0]),
                  }, label, np.array([0.0])))
          self.assertEqual(got[0][constants.FEATURES_KEY][2]['fixed_string'],
                           np.array([b'fixed_string3']))
          self.assertAlmostEqual(got[0][constants.LABELS_KEY][2],
                                 np.array([0.0]) if label is not None else None)
          self.assertAlmostEqual(got[0][constants.EXAMPLE_WEIGHTS_KEY][2],
                                 np.array([1.0]))

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result, label='result')
コード例 #20
0
    def testUnbatchExtractorMultiOutput(self):
        model_spec = config_pb2.ModelSpec(label_keys={
            'output1': 'label1',
            'output2': 'label2'
        },
                                          example_weight_keys={
                                              'output1': 'example_weight1',
                                              'output2': 'example_weight2'
                                          })
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        label_extractor = labels_extractor.LabelsExtractor(eval_config)
        example_weight_extractor = (
            example_weights_extractor.ExampleWeightsExtractor(eval_config))
        predict_extractor = predictions_extractor.PredictionsExtractor(
            eval_config)
        unbatch_inputs_extractor = unbatch_extractor.UnbatchExtractor()

        schema = text_format.Parse(
            """
        feature {
          name: "label1"
          type: FLOAT
        }
        feature {
          name: "label2"
          type: FLOAT
        }
        feature {
          name: "example_weight1"
          type: FLOAT
        }
        feature {
          name: "example_weight2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        examples = [
            self._makeExample(label1=1.0,
                              label2=0.0,
                              example_weight1=0.5,
                              example_weight2=0.5,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(label1=1.0,
                              label2=1.0,
                              example_weight1=0.0,
                              example_weight2=1.0,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string2'),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | label_extractor.stage_name >> label_extractor.ptransform
                | example_weight_extractor.stage_name >>
                example_weight_extractor.ptransform
                | predict_extractor.stage_name >> predict_extractor.ptransform
                | unbatch_inputs_extractor.stage_name >>
                unbatch_inputs_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 2)
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                            'fixed_float': np.array([1.0]),
                        })
                    self.assertEqual(
                        got[0][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string1']))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.LABELS_KEY], {
                            'output1': np.array([1.0]),
                            'output2': np.array([0.0])
                        })
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY], {
                            'output1': np.array([0.5]),
                            'output2': np.array([0.5])
                        })
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                            'fixed_float': np.array([1.0]),
                        })
                    self.assertEqual(
                        got[1][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string2']))
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.LABELS_KEY], {
                            'output1': np.array([1.0]),
                            'output2': np.array([1.0])
                        })
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.EXAMPLE_WEIGHTS_KEY], {
                            'output1': np.array([0.0]),
                            'output2': np.array([1.0])
                        })

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
    def testPreprocessedFeaturesExtractor(self, save_as_keras,
                                          preprocessing_function_names,
                                          expected_extract_keys):
        export_path = self.createModelWithMultipleDenseInputs(save_as_keras)

        eval_config = config.EvalConfig(model_specs=[
            config.ModelSpec(
                preprocessing_function_names=preprocessing_function_names)
        ])
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=export_path, tags=[tf.saved_model.SERVING])
        schema = self.createDenseInputsSchema()
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        transformation_extractor = (
            transformed_features_extractor.TransformedFeaturesExtractor(
                eval_config=eval_config,
                eval_shared_model=eval_shared_model,
                tensor_adapter_config=tensor_adapter_config))

        examples = [
            self._makeExample(input_1=1.0, input_2=2.0),
            self._makeExample(input_1=3.0, input_2=4.0),
            self._makeExample(input_1=5.0, input_2=6.0),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | transformation_extractor.stage_name >>
                transformation_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 2)
                    for item in got:
                        for extracts_key, feature_keys in expected_extract_keys.items(
                        ):
                            self.assertIn(extracts_key, item)
                            for value in item[extracts_key]:
                                self.assertEqual(set(feature_keys),
                                                 set(value.keys()),
                                                 msg='got={}'.format(item))

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #22
0
    def test_features_extractor(self):
        model_spec = config_pb2.ModelSpec()
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)

        schema = text_format.Parse(
            """
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = tf_example_record.TFExampleBeamRecord(
            schema=schema,
            raw_record_column_name=constants.ARROW_INPUT_COLUMN,
            physical_format='inmem',
            telemetry_descriptors=['testing'])

        example_kwargs = [
            {
                'fixed_int': 1,
                'fixed_float': 1.0,
                'fixed_string': 'fixed_string1'
            },
            {
                'fixed_int': 1,
                'fixed_float': 1.0,
                'fixed_string': 'fixed_string2'
            },
            {
                'fixed_int': 2,
                'fixed_float': 0.0,
                'fixed_string': 'fixed_string3'
            },
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create([
                    self._makeExample(**kwargs).SerializeToString()
                    for kwargs in example_kwargs
                ],
                                          reshuffle=False)
                | 'DecodeToRecordBatch' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    self.assertIn(constants.FEATURES_KEY, got[0])
                    self.assertLen(got[0][constants.FEATURES_KEY],
                                   4)  # 4 features
                    self.assertIn('example_weight',
                                  got[0][constants.FEATURES_KEY])
                    # Arrays of type np.object won't compare with assertAllClose
                    self.assertEqual(
                        got[0][constants.FEATURES_KEY]
                        ['example_weight'].tolist(), [None, None, None])
                    self.assertIn('fixed_int', got[0][constants.FEATURES_KEY])
                    self.assertAllClose(
                        got[0][constants.FEATURES_KEY]['fixed_int'],
                        np.array([1, 1, 2]))
                    self.assertIn('fixed_float',
                                  got[0][constants.FEATURES_KEY])
                    self.assertAllClose(
                        got[0][constants.FEATURES_KEY]['fixed_float'],
                        np.array([1.0, 1.0, 0.0]))
                    self.assertIn('fixed_string',
                                  got[0][constants.FEATURES_KEY])
                    # Arrays of type np.object won't compare with assertAllClose
                    self.assertEqual(
                        got[0][
                            constants.FEATURES_KEY]['fixed_string'].tolist(),
                        [b'fixed_string1', b'fixed_string2', b'fixed_string3'])
                    self.assertIn(constants.INPUT_KEY, got[0])
                    self.assertLen(got[0][constants.INPUT_KEY],
                                   3)  # 3 examples

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #23
0
    def testPredictExtractorWithMultiModels(self):
        temp_export_dir = self._getExportDir()
        export_dir1, _ = multi_head.simple_multi_head(temp_export_dir, None)
        export_dir2, _ = multi_head.simple_multi_head(temp_export_dir, None)

        eval_config = config.EvalConfig(model_specs=[
            config.ModelSpec(name='model1'),
            config.ModelSpec(name='model2')
        ])
        eval_shared_model1 = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir1, tags=[tf.saved_model.SERVING])
        eval_shared_model2 = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir2, tags=[tf.saved_model.SERVING])
        schema = text_format.Parse(
            """
        feature {
          name: "age"
          type: FLOAT
        }
        feature {
          name: "langauge"
          type: BYTES
        }
        feature {
          name: "english_label"
          type: FLOAT
        }
        feature {
          name: "chinese_label"
          type: FLOAT
        }
        feature {
          name: "other_label"
          type: FLOAT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.BATCHED_INPUT_KEY)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        input_extractor = batched_input_extractor.BatchedInputExtractor(
            eval_config)
        predict_extractor = batched_predict_extractor_v2.BatchedPredictExtractor(
            eval_config=eval_config,
            eval_shared_model={
                'model1': eval_shared_model1,
                'model2': eval_shared_model2
            },
            tensor_adapter_config=tensor_adapter_config)

        examples = [
            self._makeExample(age=1.0,
                              language='english',
                              english_label=1.0,
                              chinese_label=0.0,
                              other_label=0.0),
            self._makeExample(age=1.0,
                              language='chinese',
                              english_label=0.0,
                              chinese_label=1.0,
                              other_label=0.0),
            self._makeExample(age=2.0,
                              language='english',
                              english_label=1.0,
                              chinese_label=0.0,
                              other_label=0.0),
            self._makeExample(age=2.0,
                              language='other',
                              english_label=0.0,
                              chinese_label=1.0,
                              other_label=1.0)
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=4)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | input_extractor.stage_name >> input_extractor.ptransform
                | predict_extractor.stage_name >> predict_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    for item in got:
                        # We can't verify the actual predictions, but we can verify the keys
                        self.assertIn(constants.BATCHED_PREDICTIONS_KEY, item)
                        for pred in item[constants.BATCHED_PREDICTIONS_KEY]:
                            for model_name in ('model1', 'model2'):
                                self.assertIn(model_name, pred)
                                for output_name in ('chinese_head',
                                                    'english_head',
                                                    'other_head'):
                                    for pred_key in ('logistic',
                                                     'probabilities',
                                                     'all_classes'):
                                        self.assertIn(
                                            output_name + '/' + pred_key,
                                            pred[model_name])

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #24
0
    def assertMetricsComputedWithBeamAre(
        self,
        eval_saved_model_path: str,
        serialized_examples: List[bytes],
        expected_metrics: Dict[str, Any],
        add_metrics_callbacks: Optional[List[
            types.AddMetricsCallbackType]] = None):
        """Checks metrics computed using Beam.

    Metrics will be computed over all examples, without any slicing. If you
    want to provide your own PCollection (e.g. read a large number of examples
    from a file), if you want to check metrics over certain slices, or if you
    want to add additional post-export metrics, use the more general
    assertGeneralMetricsComputedWithBeamAre.

    Example usage:
      self.assertMetricsComputedWithBeamAre(
        eval_saved_model_path=path,
        serialized_examples=[self.makeExample(age=5, label=1.0),
                             self.makeExample(age=10, label=0.0)],
        expected_metrics={'average_loss': 0.1})

    Args:
      eval_saved_model_path: Path to the directory containing the
        EvalSavedModel.
      serialized_examples: List of serialized example bytes.
      expected_metrics: Dictionary of expected metric values.
      add_metrics_callbacks: Optional. Callbacks for adding additional metrics.
    """
        def check_metrics(got):
            """Check metrics callback."""
            try:
                self.assertEqual(
                    1, len(got),
                    'expecting metrics for exactly one slice, but got %d '
                    'slices instead. metrics were: %s' % (len(got), got))
                (slice_key, value) = got[0]
                self.assertEqual((), slice_key)
                self.assertDictElementsWithinBounds(
                    got_values_dict=value,
                    expected_values_dict=expected_metrics)
            except AssertionError as err:
                raise beam_util.BeamAssertException(err)

        eval_config = config_pb2.EvalConfig()
        eval_shared_model = model_eval_lib.default_eval_shared_model(
            eval_saved_model_path=eval_saved_model_path,
            add_metrics_callbacks=add_metrics_callbacks)
        extractors = model_eval_lib.default_extractors(
            eval_config=eval_config, eval_shared_model=eval_shared_model)

        tfx_io = raw_tf_record.RawBeamRecordTFXIO(
            physical_format='inmemory',
            raw_record_column_name=constants.ARROW_INPUT_COLUMN,
            telemetry_descriptors=['TFMATest'])
        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            (metrics, _), _ = (
                pipeline
                | 'CreateExamples' >> beam.Create(serialized_examples)
                | 'BatchExamples' >> tfx_io.BeamSource()
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | 'Extract' >> Extract(extractors=extractors)
                | 'ComputeMetricsAndPlots' >>
                legacy_metrics_and_plots_evaluator._ComputeMetricsAndPlots(  # pylint: disable=protected-access
                    eval_shared_model=eval_shared_model))
            # pylint: enable=no-value-for-parameter

            beam_util.assert_that(metrics, check_metrics)
コード例 #25
0
    def testPredictExtractorWithSequentialKerasModel(self):
        # Note that the input will be called 'test_input'
        model = tf.keras.models.Sequential([
            tf.keras.layers.Dense(1,
                                  activation=tf.nn.sigmoid,
                                  input_shape=(2, ),
                                  name='test')
        ])
        model.compile(optimizer=tf.keras.optimizers.Adam(lr=.001),
                      loss=tf.keras.losses.binary_crossentropy,
                      metrics=['accuracy'])

        train_features = {'test_input': [[0.0, 0.0], [1.0, 1.0]]}
        labels = [[1], [0]]
        example_weights = [1.0, 0.5]
        dataset = tf.data.Dataset.from_tensor_slices(
            (train_features, labels, example_weights))
        dataset = dataset.shuffle(buffer_size=1).repeat().batch(2)
        model.fit(dataset, steps_per_epoch=1)

        export_dir = self._getExportDir()
        model.save(export_dir, save_format='tf')

        eval_config = config.EvalConfig(model_specs=[config.ModelSpec()])
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
        schema = text_format.Parse(
            """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "test"
              value {
                dense_tensor {
                  column_name: "test"
                  shape { dim { size: 2 } }
                }
              }
            }
          }
        }
        feature {
          name: "test"
          type: FLOAT
        }
        feature {
          name: "non_model_feature"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.BATCHED_INPUT_KEY)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        input_extractor = batched_input_extractor.BatchedInputExtractor(
            eval_config)
        predict_extractor = batched_predict_extractor_v2.BatchedPredictExtractor(
            eval_config=eval_config,
            eval_shared_model=eval_shared_model,
            tensor_adapter_config=tensor_adapter_config)

        # Notice that the features are 'test' but the model expects 'test_input'.
        # This tests that the PredictExtractor properly handles this case.
        examples = [
            self._makeExample(
                test=[0.0,
                      0.0], non_model_feature=0),  # should be ignored by model
            self._makeExample(
                test=[1.0,
                      1.0], non_model_feature=1),  # should be ignored by model
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | input_extractor.stage_name >> input_extractor.ptransform
                | predict_extractor.stage_name >> predict_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    # We can't verify the actual predictions, but we can verify the keys.
                    for item in got:
                        self.assertIn(constants.BATCHED_PREDICTIONS_KEY, item)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #26
0
    def assertGeneralMetricsComputedWithBeamAre(
            self, eval_saved_model_path: str,
            examples_pcollection: beam.pvalue.PCollection,
            slice_spec: List[slicer.SingleSliceSpec],
            add_metrics_callbacks: List[types.AddMetricsCallbackType],
            expected_slice_metrics: Dict[Any, Dict[str, Any]]):
        """Checks metrics computed using Beam.

    A more general version of assertMetricsComputedWithBeamAre. Note that the
    caller is responsible for setting up and running the Beam pipeline.

    Example usage:
      def add_metrics(features, predictions, labels):
       metric_ops = {
         'mse': tf.metrics.mean_squared_error(labels, predictions['logits']),
         'mae': tf.metrics.mean_absolute_error(labels, predictions['logits']),
      }
      return metric_ops

      with beam.Pipeline() as pipeline:
        expected_slice_metrics = {
            (): {
              'mae': 0.1,
              'mse': 0.2,
              tfma.post_export_metrics.metric_keys.AUC:
                tfma.test.BoundedValue(lower_bound=0.5)
            },
            (('age', 10),): {
              'mae': 0.2,
              'mse': 0.3,
              tfma.post_export_metrics.metric_keys.AUC:
                tfma.test.BoundedValue(lower_bound=0.5)
            },
        }
        examples = pipeline | 'ReadExamples' >> beam.io.ReadFromTFRecord(path)
        self.assertGeneralMetricsComputedWithBeamAre(
          eval_saved_model_path=path,
          examples_pcollection=examples,
          slice_spec=[tfma.slicer.SingleSliceSpec(),
                      tfma.slicer.SingleSliceSpec(columns=['age'])],
          add_metrics_callbacks=[
            add_metrics, tfma.post_export_metrics.auc()],
          expected_slice_metrics=expected_slice_metrics)

    Args:
      eval_saved_model_path: Path to the directory containing the
        EvalSavedModel.
      examples_pcollection: A PCollection of serialized example bytes.
      slice_spec: List of slice specifications.
      add_metrics_callbacks: Callbacks for adding additional metrics.
      expected_slice_metrics: Dictionary of dictionaries describing the expected
        metrics for each slice. The outer dictionary map slice keys to the
        expected metrics for that slice.
    """
        def check_metrics(got):
            """Check metrics callback."""
            try:
                slices = {}
                for slice_key, value in got:
                    slices[slice_key] = value
                self.assertCountEqual(list(slices.keys()),
                                      list(expected_slice_metrics.keys()))
                for slice_key, expected_metrics in expected_slice_metrics.items(
                ):
                    self.assertDictElementsWithinBounds(
                        got_values_dict=slices[slice_key],
                        expected_values_dict=expected_metrics)
            except AssertionError as err:
                raise beam_util.BeamAssertException(err)

        slicing_specs = None
        if slice_spec:
            slicing_specs = [s.to_proto() for s in slice_spec]
        eval_config = config_pb2.EvalConfig(slicing_specs=slicing_specs)
        eval_shared_model = self.createTestEvalSharedModel(
            eval_saved_model_path=eval_saved_model_path,
            add_metrics_callbacks=add_metrics_callbacks)
        extractors = model_eval_lib.default_extractors(
            eval_config=eval_config, eval_shared_model=eval_shared_model)

        tfx_io = raw_tf_record.RawBeamRecordTFXIO(
            physical_format='inmemory',
            raw_record_column_name=constants.ARROW_INPUT_COLUMN,
            telemetry_descriptors=['TFMATest'])
        # pylint: disable=no-value-for-parameter
        (metrics, _), _ = (
            examples_pcollection
            | 'BatchExamples' >> tfx_io.BeamSource()
            | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
            | 'Extract' >> Extract(extractors=extractors)
            | 'ComputeMetricsAndPlots' >>
            legacy_metrics_and_plots_evaluator._ComputeMetricsAndPlots(  # pylint: disable=protected-access
                eval_shared_model=eval_shared_model))
        # pylint: enable=no-value-for-parameter

        beam_util.assert_that(metrics, check_metrics)
コード例 #27
0
    def testExampleWeightsExtractor(self, example_weight):
        model_spec = config_pb2.ModelSpec(example_weight_key=example_weight)
        eval_config = config_pb2.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        example_weight_extractor = (
            example_weights_extractor.ExampleWeightsExtractor(eval_config))

        example_weight_feature = ''
        if example_weight is not None:
            example_weight_feature = """
          feature {
            name: "%s"
            type: FLOAT
          }
          """ % example_weight
        schema = text_format.Parse(
            example_weight_feature + """
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        def maybe_add_key(d, key, value):
            if key is not None:
                d[key] = value
            return d

        example_kwargs = [
            maybe_add_key({
                'fixed_int': 1,
            }, example_weight, 0.5),
            maybe_add_key({
                'fixed_int': 1,
            }, example_weight, 0.0),
            maybe_add_key({
                'fixed_int': 2,
            }, example_weight, 1.0),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create([
                    self._makeExample(**kwargs).SerializeToString()
                    for kwargs in example_kwargs
                ],
                                          reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | example_weight_extractor.stage_name >>
                example_weight_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    if example_weight:
                        self.assertAllClose(
                            got[0][constants.EXAMPLE_WEIGHTS_KEY],
                            np.array([0.5, 0.0, 1.0]))
                    else:
                        self.assertNotIn(constants.EXAMPLE_WEIGHTS_KEY, got[0])

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #28
0
  def testPredictionsExtractorWithMultiClassModel(self):
    temp_export_dir = self._getExportDir()
    export_dir, _ = dnn_classifier.simple_dnn_classifier(
        temp_export_dir, None, n_classes=3)

    eval_config = config.EvalConfig(model_specs=[config.ModelSpec()])
    eval_shared_model = self.createTestEvalSharedModel(
        eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
    schema = text_format.Parse(
        """
        feature {
          name: "age"
          type: FLOAT
        }
        feature {
          name: "langauge"
          type: BYTES
        }
        feature {
          name: "label"
          type: INT
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
        arrow_schema=tfx_io.ArrowSchema(),
        tensor_representations=tfx_io.TensorRepresentations())
    feature_extractor = features_extractor.FeaturesExtractor(eval_config)
    prediction_extractor = predictions_extractor.PredictionsExtractor(
        eval_config=eval_config,
        eval_shared_model=eval_shared_model,
        tensor_adapter_config=tensor_adapter_config)

    examples = [
        self._makeExample(age=1.0, language='english', label=0),
        self._makeExample(age=2.0, language='chinese', label=1),
        self._makeExample(age=3.0, language='english', label=2),
        self._makeExample(age=4.0, language='chinese', label=1),
    ]

    with beam.Pipeline() as pipeline:
      # pylint: disable=no-value-for-parameter
      result = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=4)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | feature_extractor.stage_name >> feature_extractor.ptransform
          | prediction_extractor.stage_name >> prediction_extractor.ptransform)

      # pylint: enable=no-value-for-parameter

      def check_result(got):
        try:
          self.assertLen(got, 1)
          # We can't verify the actual predictions, but we can verify the keys.
          for item in got:
            self.assertIn(constants.PREDICTIONS_KEY, item)
            for pred in item[constants.PREDICTIONS_KEY]:
              for pred_key in ('probabilities', 'all_classes'):
                self.assertIn(pred_key, pred)

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(result, check_result, label='result')
コード例 #29
0
    def testLabelsExtractor(self, label):
        model_spec = config.ModelSpec(label_key=label)
        eval_config = config.EvalConfig(model_specs=[model_spec])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        label_extractor = labels_extractor.LabelsExtractor(eval_config)

        label_feature = ''
        if label is not None:
            label_feature = """
          feature {
            name: "%s"
            type: FLOAT
          }
          """ % label
        schema = text_format.Parse(
            label_feature + """
        feature {
          name: "fixed_int"
          type: INT
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        def maybe_add_key(d, key, value):
            if key is not None:
                d[key] = value
            return d

        example_kwargs = [
            maybe_add_key({
                'fixed_int': 1,
            }, label, 1.0),
            maybe_add_key({
                'fixed_int': 1,
            }, label, 0.0),
            maybe_add_key({
                'fixed_int': 2,
            }, label, 0.0),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create([
                    self._makeExample(**kwargs).SerializeToString()
                    for kwargs in example_kwargs
                ],
                                          reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | label_extractor.stage_name >> label_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 1)
                    tf.compat.v1.logging.error('HERE >>>> {}'.format(got))
                    self.assertAlmostEqual(
                        got[0][constants.LABELS_KEY][0],
                        np.array([1.0]) if label is not None else None)
                    self.assertAlmostEqual(
                        got[0][constants.LABELS_KEY][1],
                        np.array([0.0]) if label is not None else None)
                    self.assertAlmostEqual(
                        got[0][constants.LABELS_KEY][2],
                        np.array([0.0]) if label is not None else None)

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
コード例 #30
0
  def testBatchSizeLimitWithKerasModel(self):
    input1 = tf.keras.layers.Input(shape=(1,), batch_size=1, name='input1')
    input2 = tf.keras.layers.Input(shape=(1,), batch_size=1, name='input2')

    inputs = [input1, input2]
    input_layer = tf.keras.layers.concatenate(inputs)

    def add_1(tensor):
      return tf.add_n([tensor, tf.constant(1.0, shape=(1, 2))])

    assert_layer = tf.keras.layers.Lambda(add_1)(input_layer)

    model = tf.keras.models.Model(inputs, assert_layer)
    model.compile(
        optimizer=tf.keras.optimizers.Adam(lr=.001),
        loss=tf.keras.losses.binary_crossentropy,
        metrics=['accuracy'])

    export_dir = self._getExportDir()
    model.save(export_dir, save_format='tf')

    eval_config = config.EvalConfig(model_specs=[config.ModelSpec()])
    eval_shared_model = self.createTestEvalSharedModel(
        eval_saved_model_path=export_dir, tags=[tf.saved_model.SERVING])
    schema = text_format.Parse(
        """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "input1"
              value {
                dense_tensor {
                  column_name: "input1"
                  shape { dim { size: 1 } }
                }
              }
            }
            tensor_representation {
              key: "input2"
              value {
                dense_tensor {
                  column_name: "input2"
                  shape { dim { size: 1 } }
                }
              }
            }
          }
        }
        feature {
          name: "input1"
          type: FLOAT
        }
        feature {
          name: "input2"
          type: FLOAT
        }
        """, schema_pb2.Schema())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
        arrow_schema=tfx_io.ArrowSchema(),
        tensor_representations=tfx_io.TensorRepresentations())
    feature_extractor = features_extractor.FeaturesExtractor(eval_config)
    prediction_extractor = predictions_extractor.PredictionsExtractor(
        eval_config=eval_config,
        eval_shared_model=eval_shared_model,
        tensor_adapter_config=tensor_adapter_config)

    examples = []
    for _ in range(4):
      examples.append(self._makeExample(input1=0.0, input2=1.0))

    with beam.Pipeline() as pipeline:
      predict_extracts = (
          pipeline
          | 'Create' >> beam.Create([e.SerializeToString() for e in examples],
                                    reshuffle=False)
          | 'BatchExamples' >> tfx_io.BeamSource(batch_size=1)
          | 'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
          | feature_extractor.stage_name >> feature_extractor.ptransform
          | prediction_extractor.stage_name >> prediction_extractor.ptransform)

      # pylint: enable=no-value-for-parameter
      def check_result(got):
        try:
          self.assertLen(got, 4)
          # We can't verify the actual predictions, but we can verify the keys.
          for item in got:
            self.assertIn(constants.PREDICTIONS_KEY, item)

        except AssertionError as err:
          raise util.BeamAssertException(err)

      util.assert_that(predict_extracts, check_result, label='result')