Exemplo n.º 1
0
  def benchmarkMiniPipeline(self):
    """Benchmark a "mini" TFMA - predict, slice and compute metrics.

    Runs a "mini" version of TFMA in a Beam pipeline. Records the wall time
    taken for the whole pipeline.
    """
    self._init_model()
    pipeline = self._create_beam_pipeline()
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=benchmark_utils.read_schema(
            self._dataset.tf_metadata_schema_path()),
        raw_record_column_name=constants.ARROW_INPUT_COLUMN)
    raw_data = (
        pipeline
        | "Examples" >> beam.Create(
            self._dataset.read_raw_dataset(
                deserialize=False, limit=MAX_NUM_EXAMPLES))
        | "BatchExamples" >> tfx_io.BeamSource()
        | "InputsToExtracts" >> tfma.BatchedInputsToExtracts())

    _ = (
        raw_data
        | "FeaturesExtractor" >> features_extractor.FeaturesExtractor(
            eval_config=self._eval_config).ptransform
        | "LabelsExtractor" >> labels_extractor.LabelsExtractor(
            eval_config=self._eval_config).ptransform
        | "ExampleWeightsExtractor" >> example_weights_extractor
        .ExampleWeightsExtractor(eval_config=self._eval_config).ptransform
        | "PredictionsExtractor" >> predictions_extractor.PredictionsExtractor(
            eval_config=self._eval_config,
            eval_shared_model=self._eval_shared_model).ptransform
        | "UnbatchExtractor" >> unbatch_extractor.UnbatchExtractor().ptransform
        | "SliceKeyExtractor" >> tfma.extractors.SliceKeyExtractor().ptransform
        | "ComputeMetricsPlotsAndValidations" >>
        metrics_plots_and_validations_evaluator
        .MetricsPlotsAndValidationsEvaluator(
            eval_config=self._eval_config,
            eval_shared_model=self._eval_shared_model).ptransform)

    start = time.time()
    result = pipeline.run()
    result.wait_until_finish()
    end = time.time()
    delta = end - start

    self.report_benchmark(
        iters=1,
        wall_time=delta,
        extras={
            "num_examples": self._dataset.num_examples(limit=MAX_NUM_EXAMPLES)
        })
Exemplo n.º 2
0
  def benchmarkMiniPipelineBatched(self):
    """Benchmark a batched "mini" TFMA - predict, slice and compute metrics.

    Runs a "mini" version of TFMA in a Beam pipeline. Records the wall time
    taken for the whole pipeline.
    """
    self._init_model()
    pipeline = beam.Pipeline(runner=fn_api_runner.FnApiRunner())
    tfx_io = test_util.InMemoryTFExampleRecord(
        schema=benchmark_utils.read_schema(
            self._dataset.tf_metadata_schema_path()),
        raw_record_column_name=tfma.BATCHED_INPUT_KEY)
    raw_data = (
        pipeline
        | "Examples" >> beam.Create(
            self._dataset.read_raw_dataset(
                deserialize=False, limit=MAX_NUM_EXAMPLES))
        | "BatchExamples" >> tfx_io.BeamSource()
        | "InputsToExtracts" >> tfma.BatchedInputsToExtracts())

    _ = (
        raw_data
        | "BatchedInputExtractor" >> batched_input_extractor
        .BatchedInputExtractor(eval_config=self._eval_config).ptransform
        | "V2BatchedPredictExtractor" >>
        batched_predict_extractor_v2.BatchedPredictExtractor(
            eval_config=self._eval_config,
            eval_shared_model=self._eval_shared_model).ptransform
        | "UnbatchExtractor" >> unbatch_extractor.UnbatchExtractor().ptransform
        | "SliceKeyExtractor" >> tfma.extractors.SliceKeyExtractor().ptransform
        | "V2ComputeMetricsAndPlots" >>
        metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
            eval_config=self._eval_config,
            eval_shared_model=self._eval_shared_model).ptransform)

    start = time.time()
    result = pipeline.run()
    result.wait_until_finish()
    end = time.time()
    delta = end - start

    self.report_benchmark(
        iters=1,
        wall_time=delta,
        extras={
            "num_examples": self._dataset.num_examples(limit=MAX_NUM_EXAMPLES)
        })
    def testWriteValidationResults(self):
        model_dir, baseline_dir = self._getExportDir(), self._getBaselineDir()
        eval_shared_model = self._build_keras_model(model_dir, mul=0)
        baseline_eval_shared_model = self._build_keras_model(baseline_dir,
                                                             mul=1)
        validations_file = os.path.join(self._getTempDir(),
                                        constants.VALIDATIONS_KEY)
        schema = text_format.Parse(
            """
        tensor_representation_group {
          key: ""
          value {
            tensor_representation {
              key: "input"
              value {
                dense_tensor {
                  column_name: "input"
                  shape { dim { size: 1 } }
                }
              }
            }
          }
        }
        feature {
          name: "input"
          type: FLOAT
        }
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "extra_feature"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
            arrow_schema=tfx_io.ArrowSchema(),
            tensor_representations=tfx_io.TensorRepresentations())
        examples = [
            self._makeExample(input=0.0,
                              label=1.0,
                              example_weight=1.0,
                              extra_feature='non_model_feature'),
            self._makeExample(input=1.0,
                              label=0.0,
                              example_weight=0.5,
                              extra_feature='non_model_feature'),
        ]

        eval_config = config.EvalConfig(
            model_specs=[
                config.ModelSpec(name='candidate',
                                 label_key='label',
                                 example_weight_key='example_weight'),
                config.ModelSpec(name='baseline',
                                 label_key='label',
                                 example_weight_key='example_weight',
                                 is_baseline=True)
            ],
            slicing_specs=[config.SlicingSpec()],
            metrics_specs=[
                config.MetricsSpec(
                    metrics=[
                        config.MetricConfig(
                            class_name='WeightedExampleCount',
                            # 1.5 < 1, NOT OK.
                            threshold=config.MetricThreshold(
                                value_threshold=config.GenericValueThreshold(
                                    upper_bound={'value': 1}))),
                        config.MetricConfig(
                            class_name='ExampleCount',
                            # 2 > 10, NOT OK.
                            threshold=config.MetricThreshold(
                                value_threshold=config.GenericValueThreshold(
                                    lower_bound={'value': 10}))),
                        config.MetricConfig(
                            class_name='MeanLabel',
                            # 0 > 0 and 0 > 0%?: NOT OK.
                            threshold=config.MetricThreshold(
                                change_threshold=config.GenericChangeThreshold(
                                    direction=config.MetricDirection.
                                    HIGHER_IS_BETTER,
                                    relative={'value': 0},
                                    absolute={'value': 0}))),
                        config.MetricConfig(
                            # MeanPrediction = (0+0)/(1+0.5) = 0
                            class_name='MeanPrediction',
                            # -.01 < 0 < .01, OK.
                            # Diff% = -.333/.333 = -100% < -99%, OK.
                            # Diff = 0 - .333 = -.333 < 0, OK.
                            threshold=config.MetricThreshold(
                                value_threshold=config.GenericValueThreshold(
                                    upper_bound={'value': .01},
                                    lower_bound={'value': -.01}),
                                change_threshold=config.GenericChangeThreshold(
                                    direction=config.MetricDirection.
                                    LOWER_IS_BETTER,
                                    relative={'value': -.99},
                                    absolute={'value': 0})))
                    ],
                    model_names=['candidate', 'baseline']),
            ],
            options=config.Options(
                disabled_outputs={'values': ['eval_config.json']}),
        )
        slice_spec = [
            slicer.SingleSliceSpec(spec=s) for s in eval_config.slicing_specs
        ]
        eval_shared_models = {
            'candidate': eval_shared_model,
            'baseline': baseline_eval_shared_model
        }
        extractors = [
            batched_input_extractor.BatchedInputExtractor(eval_config),
            batched_predict_extractor_v2.BatchedPredictExtractor(
                eval_shared_model=eval_shared_models,
                eval_config=eval_config,
                tensor_adapter_config=tensor_adapter_config),
            unbatch_extractor.UnbatchExtractor(),
            slice_key_extractor.SliceKeyExtractor(slice_spec=slice_spec)
        ]
        evaluators = [
            metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
                eval_config=eval_config, eval_shared_model=eval_shared_models)
        ]
        output_paths = {
            constants.VALIDATIONS_KEY: validations_file,
        }
        writers = [
            metrics_plots_and_validations_writer.
            MetricsPlotsAndValidationsWriter(output_paths,
                                             add_metrics_callbacks=[])
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            _ = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples])
                | 'BatchExamples' >> tfx_io.BeamSource()
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | 'ExtractEvaluate' >> model_eval_lib.ExtractAndEvaluate(
                    extractors=extractors, evaluators=evaluators)
                |
                'WriteResults' >> model_eval_lib.WriteResults(writers=writers))
            # pylint: enable=no-value-for-parameter

        validation_result = model_eval_lib.load_validation_result(
            os.path.dirname(validations_file))

        expected_validations = [
            text_format.Parse(
                """
            metric_key {
              name: "weighted_example_count"
              model_name: "candidate"
            }
            metric_threshold {
              value_threshold {
                upper_bound {
                  value: 1.0
                }
              }
            }
            metric_value {
              double_value {
                value: 1.5
              }
            }
            """, validation_result_pb2.ValidationFailure()),
            text_format.Parse(
                """
            metric_key {
              name: "example_count"
            }
            metric_threshold {
              value_threshold {
                lower_bound {
                  value: 10.0
                }
              }
            }
            metric_value {
              double_value {
                value: 2.0
              }
            }
            """, validation_result_pb2.ValidationFailure()),
            text_format.Parse(
                """
            metric_key {
              name: "mean_label"
              model_name: "candidate"
              is_diff: true
            }
            metric_threshold {
              change_threshold {
                absolute {
                  value: 0.0
                }
                relative {
                  value: 0.0
                }
                direction: HIGHER_IS_BETTER
              }
            }
            metric_value {
              double_value {
                value: 0.0
              }
            }
            """, validation_result_pb2.ValidationFailure()),
        ]
        self.assertFalse(validation_result.validation_ok)
        self.assertLen(validation_result.metric_validations_per_slice, 1)
        self.assertCountEqual(
            expected_validations,
            validation_result.metric_validations_per_slice[0].failures)
Exemplo n.º 4
0
    def _runMiniPipeline(self, multi_model):
        """Benchmark a "mini" TFMA - predict, slice and compute metrics.

    Runs a "mini" version of TFMA in a Beam pipeline. Records the wall time
    taken for the whole pipeline.

    Args:
      multi_model: True if multiple models should be used in the benchmark.
    """
        self._init_model(multi_model, validation=False)
        pipeline = self._create_beam_pipeline()
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=benchmark_utils.read_schema(
                self._dataset.tf_metadata_schema_path()),
            raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        raw_data = (pipeline
                    | "Examples" >> beam.Create(
                        self._dataset.read_raw_dataset(
                            deserialize=False, limit=self._max_num_examples()))
                    | "BatchExamples" >> tfx_io.BeamSource()
                    | "InputsToExtracts" >> tfma.BatchedInputsToExtracts())

        def rescale_labels(extracts):
            # Transform labels to [0, 1] so we can test metrics that require labels in
            # that range.
            result = copy.copy(extracts)
            result[constants.LABELS_KEY] = self._transform_labels(
                extracts[constants.LABELS_KEY])
            return result

        _ = (raw_data
             | "FeaturesExtractor" >> features_extractor.FeaturesExtractor(
                 eval_config=self._eval_config).ptransform
             | "LabelsExtractor" >> labels_extractor.LabelsExtractor(
                 eval_config=self._eval_config).ptransform
             | "RescaleLabels" >> beam.Map(rescale_labels)
             | "ExampleWeightsExtractor" >> example_weights_extractor.
             ExampleWeightsExtractor(eval_config=self._eval_config).ptransform
             | "PredictionsExtractor" >>
             predictions_extractor.PredictionsExtractor(
                 eval_config=self._eval_config,
                 eval_shared_model=self._eval_shared_models).ptransform
             | "UnbatchExtractor" >>
             unbatch_extractor.UnbatchExtractor().ptransform
             | "SliceKeyExtractor" >>
             tfma.extractors.SliceKeyExtractor().ptransform
             | "ComputeMetricsPlotsAndValidations" >>
             metrics_plots_and_validations_evaluator.
             MetricsPlotsAndValidationsEvaluator(
                 eval_config=self._eval_config,
                 eval_shared_model=self._eval_shared_models).ptransform)

        start = time.time()
        for _ in range(_ITERS):
            result = pipeline.run()
            result.wait_until_finish()
        end = time.time()
        delta = end - start

        self.report_benchmark(
            iters=_ITERS,
            wall_time=delta,
            extras={
                "num_examples":
                self._dataset.num_examples(limit=self._max_num_examples())
            })
Exemplo n.º 5
0
    def testUnbatchExtractor(self):
        model_spec = config.ModelSpec(label_key='label',
                                      example_weight_key='example_weight')
        eval_config = config.EvalConfig(model_specs=[model_spec])
        input_extractor = batched_input_extractor.BatchedInputExtractor(
            eval_config)
        unbatch_inputs_extractor = unbatch_extractor.UnbatchExtractor()

        schema = text_format.Parse(
            """
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)
        examples = [
            self._makeExample(label=1.0,
                              example_weight=0.5,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(label=0.0,
                              example_weight=0.0,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string2'),
            self._makeExample(label=0.0,
                              example_weight=1.0,
                              fixed_int=2,
                              fixed_float=0.0,
                              fixed_string='fixed_string3')
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=3)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | input_extractor.stage_name >> input_extractor.ptransform
                | unbatch_inputs_extractor.stage_name >>
                unbatch_inputs_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 3)
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                            'fixed_float': np.array([1.0]),
                        })
                    self.assertEqual(
                        got[0][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string1']))
                    self.assertAlmostEqual(got[0][constants.LABELS_KEY],
                                           np.array([1.0]))
                    self.assertAlmostEqual(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY], np.array([0.5]))
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                            'fixed_float': np.array([1.0]),
                        })
                    self.assertEqual(
                        got[1][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string2']))
                    self.assertAlmostEqual(got[1][constants.LABELS_KEY],
                                           np.array([0.0]))
                    self.assertAlmostEqual(
                        got[1][constants.EXAMPLE_WEIGHTS_KEY], np.array([0.0]))
                    self.assertDictElementsAlmostEqual(
                        got[2][constants.FEATURES_KEY], {
                            'fixed_int': np.array([2]),
                            'fixed_float': np.array([0.0]),
                        })
                    self.assertEqual(
                        got[2][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string3']))
                    self.assertAlmostEqual(got[2][constants.LABELS_KEY],
                                           np.array([0.0]))
                    self.assertAlmostEqual(
                        got[2][constants.EXAMPLE_WEIGHTS_KEY], np.array([1.0]))

                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')
    def testUnbatchExtractorMultiModel(self):
        model_spec1 = config_pb2.ModelSpec(name='model1',
                                           label_key='label',
                                           example_weight_key='example_weight',
                                           prediction_key='fixed_float')
        model_spec2 = config_pb2.ModelSpec(name='model2',
                                           label_keys={
                                               'output1': 'label1',
                                               'output2': 'label2'
                                           },
                                           example_weight_keys={
                                               'output1': 'example_weight1',
                                               'output2': 'example_weight2'
                                           },
                                           prediction_keys={
                                               'output1': 'fixed_float',
                                               'output2': 'fixed_float'
                                           })
        eval_config = config_pb2.EvalConfig(
            model_specs=[model_spec1, model_spec2])
        feature_extractor = features_extractor.FeaturesExtractor(eval_config)
        label_extractor = labels_extractor.LabelsExtractor(eval_config)
        example_weight_extractor = (
            example_weights_extractor.ExampleWeightsExtractor(eval_config))
        predict_extractor = predictions_extractor.PredictionsExtractor(
            eval_config)
        unbatch_inputs_extractor = unbatch_extractor.UnbatchExtractor()

        schema = text_format.Parse(
            """
        feature {
          name: "label"
          type: FLOAT
        }
        feature {
          name: "label1"
          type: FLOAT
        }
        feature {
          name: "label2"
          type: FLOAT
        }
        feature {
          name: "example_weight"
          type: FLOAT
        }
        feature {
          name: "example_weight1"
          type: FLOAT
        }
        feature {
          name: "example_weight2"
          type: FLOAT
        }
        feature {
          name: "fixed_int"
          type: INT
        }
        feature {
          name: "fixed_float"
          type: FLOAT
        }
        feature {
          name: "fixed_string"
          type: BYTES
        }
        """, schema_pb2.Schema())
        tfx_io = test_util.InMemoryTFExampleRecord(
            schema=schema, raw_record_column_name=constants.ARROW_INPUT_COLUMN)

        examples = [
            self._makeExample(label=1.0,
                              label1=1.0,
                              label2=0.0,
                              example_weight=0.5,
                              example_weight1=0.5,
                              example_weight2=0.5,
                              fixed_int=1,
                              fixed_float=1.0,
                              fixed_string='fixed_string1'),
            self._makeExample(label=1.0,
                              label1=1.0,
                              label2=1.0,
                              example_weight=0.0,
                              example_weight1=0.0,
                              example_weight2=1.0,
                              fixed_int=1,
                              fixed_float=2.0,
                              fixed_string='fixed_string2'),
        ]

        with beam.Pipeline() as pipeline:
            # pylint: disable=no-value-for-parameter
            result = (
                pipeline
                | 'Create' >> beam.Create(
                    [e.SerializeToString() for e in examples], reshuffle=False)
                | 'BatchExamples' >> tfx_io.BeamSource(batch_size=2)
                |
                'InputsToExtracts' >> model_eval_lib.BatchedInputsToExtracts()
                | feature_extractor.stage_name >> feature_extractor.ptransform
                | label_extractor.stage_name >> label_extractor.ptransform
                | example_weight_extractor.stage_name >>
                example_weight_extractor.ptransform
                | predict_extractor.stage_name >> predict_extractor.ptransform
                | unbatch_inputs_extractor.stage_name >>
                unbatch_inputs_extractor.ptransform)

            # pylint: enable=no-value-for-parameter

            def check_result(got):
                try:
                    self.assertLen(got, 2)
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                        })
                    self.assertEqual(
                        got[0][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string1']))
                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name, got[0][constants.LABELS_KEY])
                        self.assertIn(model_name,
                                      got[0][constants.EXAMPLE_WEIGHTS_KEY])
                        self.assertIn(model_name,
                                      got[0][constants.PREDICTIONS_KEY])
                    self.assertAlmostEqual(
                        got[0][constants.LABELS_KEY]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.LABELS_KEY]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([0.0])
                        })
                    self.assertAlmostEqual(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY]['model1'],
                        np.array([0.5]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.EXAMPLE_WEIGHTS_KEY]['model2'], {
                            'output1': np.array([0.5]),
                            'output2': np.array([0.5])
                        })
                    self.assertAlmostEqual(
                        got[0][constants.PREDICTIONS_KEY]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[0][constants.PREDICTIONS_KEY]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([1.0])
                        })

                    self.assertDictElementsAlmostEqual(
                        got[1][constants.FEATURES_KEY], {
                            'fixed_int': np.array([1]),
                        })
                    self.assertEqual(
                        got[1][constants.FEATURES_KEY]['fixed_string'],
                        np.array([b'fixed_string2']))
                    for model_name in ('model1', 'model2'):
                        self.assertIn(model_name, got[1][constants.LABELS_KEY])
                        self.assertIn(model_name,
                                      got[1][constants.EXAMPLE_WEIGHTS_KEY])
                        self.assertIn(model_name,
                                      got[1][constants.PREDICTIONS_KEY])
                    self.assertAlmostEqual(
                        got[1][constants.LABELS_KEY]['model1'],
                        np.array([1.0]))
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.LABELS_KEY]['model2'], {
                            'output1': np.array([1.0]),
                            'output2': np.array([1.0])
                        })
                    self.assertAlmostEqual(
                        got[1][constants.EXAMPLE_WEIGHTS_KEY]['model1'],
                        np.array([0.0]))
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.EXAMPLE_WEIGHTS_KEY]['model2'], {
                            'output1': np.array([0.0]),
                            'output2': np.array([1.0])
                        })
                    self.assertAlmostEqual(
                        got[1][constants.PREDICTIONS_KEY]['model1'],
                        np.array([2.0]))
                    self.assertDictElementsAlmostEqual(
                        got[1][constants.PREDICTIONS_KEY]['model2'], {
                            'output1': np.array([2.0]),
                            'output2': np.array([2.0])
                        })
                except AssertionError as err:
                    raise util.BeamAssertException(err)

            util.assert_that(result, check_result, label='result')