コード例 #1
0
 def _run_inference_with_beam(self, example_path, inference_endpoint,
                              prediction_log_path):
     with beam.Pipeline() as pipeline:
         _ = (pipeline
              | 'ReadExamples' >> beam.io.ReadFromTFRecord(example_path)
              | 'ParseExamples' >> beam.Map(tf.train.Example.FromString)
              | 'RunInference' >>
              run_inference.RunInference(inference_endpoint)
              | 'WritePredictions' >> beam.io.WriteToTFRecord(
                  prediction_log_path,
                  coder=beam.coders.ProtoCoder(
                      prediction_log_pb2.PredictionLog)))
コード例 #2
0
    def testTelemetry(self):
        example_path = self._get_output_data_dir('examples')
        self._prepare_multihead_examples(example_path)
        model_path = self._get_output_data_dir('model')
        self._build_multihead_model(model_path)
        inference_endpoint = model_spec_pb2.InferenceEndpoint(
            saved_model_spec=model_spec_pb2.SavedModelSpec(
                model_path=model_path, signature_name=['classify_sum']))
        pipeline = beam.Pipeline()
        _ = (
            pipeline | 'ReadExamples' >> beam.io.ReadFromTFRecord(example_path)
            | 'ParseExamples' >> beam.Map(tf.train.Example.FromString)
            | 'RunInference' >> run_inference.RunInference(inference_endpoint))
        run_result = pipeline.run()
        run_result.wait_until_finish()

        num_inferences = run_result.metrics().query(
            MetricsFilter().with_name('num_inferences'))
        self.assertTrue(num_inferences['counters'])
        self.assertEqual(num_inferences['counters'][0].result, 2)
        num_instances = run_result.metrics().query(
            MetricsFilter().with_name('num_instances'))
        self.assertTrue(num_instances['counters'])
        self.assertEqual(num_instances['counters'][0].result, 2)
        inference_request_batch_size = run_result.metrics().query(
            MetricsFilter().with_name('inference_request_batch_size'))
        self.assertTrue(inference_request_batch_size['distributions'])
        self.assertEqual(
            inference_request_batch_size['distributions'][0].result.sum, 2)
        inference_request_batch_byte_size = run_result.metrics().query(
            MetricsFilter().with_name('inference_request_batch_byte_size'))
        self.assertTrue(inference_request_batch_byte_size['distributions'])
        self.assertEqual(
            inference_request_batch_byte_size['distributions'][0].result.sum,
            sum(element.ByteSize() for element in self._multihead_examples))
        inference_batch_latency_micro_secs = run_result.metrics().query(
            MetricsFilter().with_name('inference_batch_latency_micro_secs'))
        self.assertTrue(inference_batch_latency_micro_secs['distributions'])
        self.assertGreaterEqual(
            inference_batch_latency_micro_secs['distributions'][0].result.sum,
            0)
        load_model_latency_milli_secs = run_result.metrics().query(
            MetricsFilter().with_name('load_model_latency_milli_secs'))
        self.assertTrue(load_model_latency_milli_secs['distributions'])
        self.assertGreaterEqual(
            load_model_latency_milli_secs['distributions'][0].result.sum, 0)
コード例 #3
0
    def _run_model_inference(self, model_path: Text,
                             example_uris: Mapping[Text,
                                                   Text], output_path: Text,
                             model_spec: bulk_inferrer_pb2.ModelSpec) -> None:
        """Runs model inference on given example data.

    Args:
      model_path: Path to model.
      example_uris: Mapping of example split name to example uri.
      output_path: Path to output generated prediction logs.
      model_spec: bulk_inferrer_pb2.ModelSpec instance.

    Returns:
      None
    """

        saved_model_spec = model_spec_pb2.SavedModelSpec(
            model_path=model_path,
            tag=model_spec.tag,
            signature_name=model_spec.model_signature_name)
        inference_endpoint = model_spec_pb2.InferenceEndpoint()
        inference_endpoint.saved_model_spec.CopyFrom(saved_model_spec)
        with self._make_beam_pipeline() as pipeline:
            data_list = []
            for split, example_uri in example_uris.items():
                data = (
                    pipeline
                    | 'ReadData[{}]'.format(split) >> beam.io.ReadFromTFRecord(
                        file_pattern=io_utils.all_files_pattern(example_uri)))
                data_list.append(data)
            _ = ([data for data in data_list]
                 | 'FlattenExamples' >> beam.Flatten(pipeline=pipeline)
                 | 'ParseExamples' >> beam.Map(tf.train.Example.FromString)
                 | 'RunInference' >>
                 run_inference.RunInference(inference_endpoint)
                 | 'WritePredictionLogs' >> beam.io.WriteToTFRecord(
                     output_path,
                     file_name_suffix='.gz',
                     coder=beam.coders.ProtoCoder(
                         prediction_log_pb2.PredictionLog)))
        logging.info('Inference result written to %s.', output_path)