コード例 #1
0
ファイル: executor.py プロジェクト: suryaavala/tfx
def _convert_to_prediction_log(request: iv_types.Request):
    """Try convert infra validation request to TF-Serving PredictionLog."""
    if isinstance(request, classification_pb2.ClassificationRequest):
        return prediction_log_pb2.PredictionLog(
            classify_log=prediction_log_pb2.ClassifyLog(request=request))
    elif isinstance(request, regression_pb2.RegressionRequest):
        return prediction_log_pb2.PredictionLog(
            regress_log=prediction_log_pb2.RegressLog(request=request))
    elif isinstance(request, predict_pb2.PredictRequest):
        return prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request))
    else:
        raise NotImplementedError(
            f'Cannot convert {type(request)} to PredictionLog')
コード例 #2
0
def create_warmup_records(saved_models_dir: str = 'saved_models'):
    saved_models_path = Path(saved_models_dir)

    for target, shape in TARGETS.items():
        model_path = saved_models_path / target / '1'

        # SavedModel warmup records are placed inside the "assets.extra" directory
        # so we need to create this directory if it doesn't exist yet
        extra_dir = model_path / 'assets.extra'
        extra_dir.mkdir(parents=False, exist_ok=True)

        # extract n_timesteps and n_features from target shape
        _, n_timesteps, n_features = shape

        # create a single inference sample to warmup the model
        # path is converted to string since TFRecordWriter doesn't support path
        records_path = extra_dir / 'tf_serving_warmup_requests'
        with tf.io.TFRecordWriter(str(records_path)) as writer:
            log = prediction_log_pb2.PredictionLog(
                predict_log=prediction_log_pb2.PredictLog(
                    request=predict_pb2.PredictRequest(
                        inputs={
                            'input_1':
                            tf.make_tensor_proto([[[0] * n_features] *
                                                  n_timesteps],
                                                 dtype=tf.float32,
                                                 shape=shape)
                        })))
            writer.write(log.SerializeToString())
コード例 #3
0
    def test_convert_for_regress_invalid_output_example_spec(self):
        prediction_log = text_format.Parse(
            """
      regress_log {
        request {
          input {
            example_list {
              examples {
                features {
                  feature: {
                    key: "regress_input"
                    value: { bytes_list: { value: "feature" } }
                  }
                }
              }
            }
          }
        }
        response {
          result {
            regressions {
              value: 0.7
            }
          }
        }
      }
    """, prediction_log_pb2.PredictionLog())

        output_example_spec = text_format.Parse(
            """
        output_columns_spec {
        }
    """, bulk_inferrer_pb2.OutputExampleSpec())
        with self.assertRaises(ValueError):
            utils.convert(prediction_log, output_example_spec)
コード例 #4
0
    def test_model_predict(self):
        predictions = [{'output_1': [0.901], 'output_2': [0.997]}]
        builder = http.RequestMockBuilder({
            'ml.projects.predict':
            (None, self._make_response_body(predictions, successful=True))
        })
        resource = discovery.build('ml',
                                   'v1',
                                   http=http.HttpMock(
                                       self._discovery_testdata_dir,
                                       {'status': http_client.OK}),
                                   requestBuilder=builder)
        with mock.patch('googleapiclient.discovery.' 'build') as response_mock:
            response_mock.side_effect = lambda service, version: resource
            inference_spec_type = model_spec_pb2.InferenceSpecType(
                ai_platform_prediction_model_spec=model_spec_pb2.
                AIPlatformPredictionModelSpec(
                    project_id='test-project',
                    model_name='test-model',
                ))

            prediction_log = prediction_log_pb2.PredictionLog()
            prediction_log.predict_log.response.outputs['output_1'].CopyFrom(
                tf.make_tensor_proto(values=[0.901],
                                     dtype=tf.double,
                                     shape=(1, 1)))
            prediction_log.predict_log.response.outputs['output_2'].CopyFrom(
                tf.make_tensor_proto(values=[0.997],
                                     dtype=tf.double,
                                     shape=(1, 1)))

            self._set_up_pipeline(inference_spec_type)
            assert_that(self.pcoll, equal_to([prediction_log]))
            self._run_inference_with_beam()
コード例 #5
0
 def _post_process(
     self,
     examples: List[_INPUT_TYPE],
     serialize_examples: List[bytes],
     outputs: List[Mapping[Text, Any]]
     ) -> List[prediction_log_pb2.PredictionLog]:
   del examples
   result = []
   for i, serialized_example in enumerate(serialize_examples):
     prediction_log = prediction_log_pb2.PredictionLog()
     predict_log = prediction_log.predict_log
     input_tensor_proto = predict_log.request.inputs[
         tf.saved_model.PREDICT_INPUTS]
     input_tensor_proto.dtype = tf.string.as_datatype_enum
     input_tensor_proto.tensor_shape.dim.add().size = 1
     input_tensor_proto.string_val.append(serialized_example)
     for output_alias, values in outputs[i].items():
       values = np.array(values)
       tensor_proto = tf.make_tensor_proto(
           values=values,
           dtype=tf.as_dtype(values.dtype).as_datatype_enum,
           shape=np.expand_dims(values, axis=0).shape)
       predict_log.response.outputs[output_alias].CopyFrom(tensor_proto)
     result.append(prediction_log)
   return result
コード例 #6
0
def main():
    with tf.python_io.TFRecordWriter("tf_serving_warmup_requests") as writer:
        # replace <request> with one of:
        # predict_pb2.PredictRequest(..)
        # classification_pb2.ClassificationRequest(..)
        # regression_pb2.RegressionRequest(..)
        # inference_pb2.MultiInferenceRequest(..)

        request = predict_pb2.PredictRequest()
        request.model_spec.name = MODEL_NAME

        request.model_spec.signature_name = 'serving_default'
        input_name = 'inputs'
        example1 = tf.train.Example(features=tf.train.Features(
            feature={
                'user_feature': _bytes_feature(user_feature),
                'ctx_features': _bytes_feature(ctx_features),
                'item_features': _bytes_feature(item_features),
            })).SerializeToString()
        print("example len = {}".format(len(example1)))
        examples = [example1]
        request.inputs[input_name].CopyFrom(
            tf.contrib.util.make_tensor_proto(examples, dtype=tf.string))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request))
        writer.write(log.SerializeToString())
コード例 #7
0
 def _serving_warm_up(self):
     _dataset = self.Dataset(self._flags.compression_type,
                             self._flags.label_key,
                             self._flags.schema,
                             self._flags.valid_path,
                             prebatch=self._flags.prebatch,
                             batch_size=1,
                             **self.dataset_args)
     feature, labels = _dataset.eval_set().make_one_shot_iterator(
     ).get_next()
     feature = self._parse_sequence_weight(feature)
     feature = self.sparse2dense(feature, self._dataset.varlen_list)
     feature = {
         name: tensor
         for name, tensor in feature.items() if name in self._features
     }
     with tf.Session(config=self.sess_config) as sess:
         feature_n = sess.run(feature)
     del sess
     del _dataset
     request = predict_pb2.PredictRequest()
     for k, v in feature_n.items():
         request.inputs[k].CopyFrom(tf.make_tensor_proto(v, shape=v.shape))
     log = prediction_log_pb2.PredictionLog(
         predict_log=prediction_log_pb2.PredictLog(request=request))
     filename = 'tf_serving_warmup_requests'
     file_dir = self._flags.checkpoint_dir
     path = os.path.join(file_dir, filename)
     os.makedirs(file_dir, exist_ok=True)
     if os.path.exists(path):
         os.remove(path)
     with tf.python_io.TFRecordWriter(path) as writer:
         writer.write(log.SerializeToString())
     return {filename: path}
コード例 #8
0
ファイル: yolo.py プロジェクト: zeweiru/mobilenetv2-yolov3
def export_serving_model(yolo, path):
    if tf.io.gfile.exists(path):
        overwrite = input("Overwrite existed model(yes/no):")
        if overwrite == 'yes':
            tf.io.gfile.rmtree(path)
        else:
            raise ValueError(
                "Export directory already exists, and isn't empty. Please choose a different export directory, or delete all the contents of the specified directory: "
                + path)
    tf.saved_model.simple_save(
        yolo.sess,
        path,
        inputs={'predict_image:0': yolo.input},
        outputs={t.name: t
                 for t in yolo.yolo_model.output})

    asset_extra = os.path.join(path, "assets.extra")
    tf.io.gfile.mkdir(asset_extra)
    with tf.io.TFRecordWriter(
            os.path.join(asset_extra, "tf_serving_warmup_requests")) as writer:
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'detection'
        request.model_spec.signature_name = 'serving_default'
        image = Image.open('../download/image3.jpeg')
        scale = yolo.input_shape[0] / max(image.size)
        if scale < 1:
            image = image.resize((int(line * scale) for line in image.size),
                                 Image.BILINEAR)
        image_data = np.array(image, dtype='uint8')
        image_data = np.expand_dims(image_data, 0)
        request.inputs['predict_image:0'].CopyFrom(
            tf.make_tensor_proto(image_data))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request))
        writer.write(log.SerializeToString())
コード例 #9
0
def main(_):
    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    # Warm up
    request_w = predict_pb2.PredictRequest()
    test_batch, y_true = gen_data(FLAGS.test_file, 1)
    request_w.model_spec.name = FLAGS.model
    request_w.model_spec.signature_name = FLAGS.signature_name
    request_w.inputs['examples'].CopyFrom(
        tf.make_tensor_proto(test_batch, shape=[len(test_batch)]))
    prediction_log_pb2.PredictionLog(predict_log=prediction_log_pb2.PredictLog(
        request=request_w))

    request = predict_pb2.PredictRequest()
    request.model_spec.name = FLAGS.model
    request.model_spec.signature_name = FLAGS.signature_name

    test_batch, y_true = gen_data(FLAGS.test_file, FLAGS.batch_size)
    request.inputs['examples'].CopyFrom(
        tf.make_tensor_proto(test_batch, shape=[len(test_batch)]))

    start = time()
    result_future = stub.Predict.future(request, 10.0)
    elapsed = (time() - start)
    prediction = result_future.result().outputs['prob']
    # print(prediction)
    print('Batch size: ', FLAGS.batch_size)
    print('Predict AUC: ', roc_auc_score(y_true, prediction.float_val))
    print("Predict time used: {0}ms".format(round(elapsed * 1000, 2)))
コード例 #10
0
    def create_warmup_requests_numpy(self, batch_sizes, export_dir):
        """Creates warm-up requests for a given feature specification.

    This writes an output file in
    `export_dir/assets.extra/tf_serving_warmup_requests` for use with Servo.

    Args:
      batch_sizes: Batch sizes of warm-up requests to write.
      export_dir: Base directory for the export.

    Returns:
      The filename written.
    """
        feature_spec = self._get_input_features_for_receiver_fn()

        flat_feature_spec = tensorspec_utils.flatten_spec_structure(
            feature_spec)
        tf.io.gfile.makedirs(export_dir)
        request_filename = os.path.join(export_dir,
                                        'tf_serving_warmup_requests')
        with tf.python_io.TFRecordWriter(request_filename) as writer:
            for batch_size in batch_sizes:
                request = predict_pb2.PredictRequest()
                request.model_spec.name = self._model_name
                numpy_feature_specs = tensorspec_utils.make_constant_numpy(
                    flat_feature_spec, constant_value=0, batch_size=batch_size)

                for key, numpy_spec in numpy_feature_specs.items():
                    request.inputs[key].CopyFrom(
                        contrib_util.make_tensor_proto(numpy_spec))

                log = prediction_log_pb2.PredictionLog(
                    predict_log=prediction_log_pb2.PredictLog(request=request))
                writer.write(log.SerializeToString())
        return request_filename
コード例 #11
0
def export_serving_model(yolo, path, warmup_path=None, with_tensorrt=False):
    overwrite_path(path)
    tf.saved_model.save(yolo.yolo_model, path)
    if with_tensorrt:
        params = trt.TrtConversionParams(
            rewriter_config_template=None,
            max_workspace_size_bytes=trt.DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
            precision_mode=trt.TrtPrecisionMode.FP16,
            minimum_segment_size=3,
            is_dynamic_op=True,
            maximum_cached_engines=1,
            use_calibration=True,
            max_batch_size=1)
        converter = trt.TrtGraphConverterV2(input_saved_model_dir=path,
                                            conversion_params=params)
        converter.convert()
        tf.io.gfile.rmtree(path)
        converter.save(path)
    asset_extra = os.path.join(path, "assets.extra")
    tf.io.gfile.mkdir(asset_extra)
    with tf.io.TFRecordWriter(
            os.path.join(asset_extra, "tf_serving_warmup_requests")) as writer:
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'detection'
        request.model_spec.signature_name = 'serving_default'
        if warmup_path is None:
            warmup_path = input('Please enter warm up image path:')
        image = open(warmup_path, 'rb').read()
        image_data = np.expand_dims(image, 0)
        request.inputs['predict_image'].CopyFrom(
            tf.compat.v1.make_tensor_proto(image_data))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request))
        writer.write(log.SerializeToString())
コード例 #12
0
def main():
    max_seq_len = 40
    input_id = [
        101, 6821, 3221, 671, 702, 3844, 6407, 4638, 1368, 2094, 102, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0, 0
    ]
    input_mask = [
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    ]
    segment_id = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    ]
    if len(input_id) < max_seq_len:
        input_id.extend([0] * (max_seq_len - len(input_id)))
        input_mask.extend([0] * (max_seq_len - len(input_mask)))
        segment_id.extend([0] * (max_seq_len - len(segment_id)))

    batch_size = 10
    input_ids = []
    input_masks = []
    segment_ids = []
    for i in range(batch_size):
        input_ids.append(input_id)
        input_masks.append(input_mask)
        segment_ids.append(segment_id)
    import numpy as np
    input_ids = np.array(input_ids)
    input_mask = np.array(input_masks)
    segment_ids = np.array(segment_ids)
    with tf.python_io.TFRecordWriter("tf_serving_warmup_requests") as writer:
        request = predict_pb2.PredictRequest(
            model_spec=model_pb2.ModelSpec(name="intent_model",
                                           signature_name='serving_default'),
            inputs={
                "input_ids":
                tf.make_tensor_proto(
                    input_ids,
                    dtype=tf.int32,
                    shape=[input_ids.shape[0], input_ids.shape[1]]),
                "input_mask":
                tf.make_tensor_proto(
                    input_mask,
                    dtype=tf.int32,
                    shape=[input_mask.shape[0], input_mask.shape[1]]),
                "segment_ids":
                tf.make_tensor_proto(
                    segment_ids,
                    dtype=tf.int32,
                    shape=[segment_ids.shape[0], segment_ids.shape[1]]),
                "training":
                tf.make_tensor_proto(False, dtype=tf.bool, shape=[])
            })
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request))
        writer.write(log.SerializeToString())
コード例 #13
0
 def process(
     self, element: Tuple[tf.train.Example, regression_pb2.Regression]
 ) -> Iterable[prediction_log_pb2.PredictionLog]:
   (train_example, regression) = element
   result = prediction_log_pb2.PredictionLog()
   result.regress_log.request.input.example_list.examples.add().CopyFrom(
       train_example)
   result.regress_log.response.result.regressions.add().CopyFrom(regression)
   yield result
コード例 #14
0
 def _get_results(self, prediction_log_path):
     results = []
     for f in tf.io.gfile.glob(prediction_log_path + '-?????-of-?????'):
         record_iterator = tf.compat.v1.io.tf_record_iterator(path=f)
         for record_string in record_iterator:
             prediction_log = prediction_log_pb2.PredictionLog()
             prediction_log.MergeFromString(record_string)
             results.append(prediction_log)
     return results
コード例 #15
0
 def process(
     self, element: Tuple[tf.train.Example,
                          inference_pb2.MultiInferenceResponse]
 ) -> Iterable[prediction_log_pb2.PredictionLog]:
     (train_example, multi_inference_response) = element
     result = prediction_log_pb2.PredictionLog()
     (result.multi_inference_log.request.input.example_list.examples.add().
      CopyFrom(train_example))
     result.multi_inference_log.response.CopyFrom(multi_inference_response)
     yield result
コード例 #16
0
 def process(
     self, element: Tuple[tf.train.Example, classification_pb2.Classifications]
 ) -> Iterable[prediction_log_pb2.PredictionLog]:
   (train_example, classifications) = element
   result = prediction_log_pb2.PredictionLog()
   result.classify_log.request.input.example_list.examples.add().CopyFrom(
       train_example)
   result.classify_log.response.result.classifications.add().CopyFrom(
       classifications)
   yield result
コード例 #17
0
def write_warmup_requests(savedmodel_dir,
                          model_name,
                          image_size,
                          batch_sizes=None,
                          num_requests=8,
                          image_format='PNG',
                          input_signature='input'):
    """Writes warmup requests for inference into a tfrecord file.

  Args:
    savedmodel_dir: string, the file to the exported model folder.
    model_name: string, a model name used inside the model server.
    image_size: tuple/list or int, size of image. For list/tuple input, assuming
      it contains image height and width.
    batch_sizes: list, a list of batch sizes to create different input requests.
    num_requests: int, number of requests per batch size.
    image_format: string, the format of the image to write (PNG, JPEG)
    input_signature: string, input signature defined in exported saved model.

  Raises:
    ValueError: if batch_sizes is not a valid integer list.
  """
    from tensorflow_serving.apis import predict_pb2  # pylint: disable=g-import-not-at-top
    from tensorflow_serving.apis import prediction_log_pb2  # pylint: disable=g-import-not-at-top
    if not isinstance(batch_sizes, list) or not batch_sizes:
        raise ValueError('batch sizes should be a valid non-empty list.')
    extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')
    tf.gfile.MkDir(extra_assets_dir)
    if isinstance(image_size, int):
        height = image_size
        width = image_size
    elif isinstance(image_size, tuple) or isinstance(image_size, list):
        height = image_size[0]
        width = image_size[1]
    else:
        raise ValueError('image_size is not a supported type: %s' %
                         type(image_size))

    with tf.python_io.TFRecordWriter(
            os.path.join(extra_assets_dir,
                         'tf_serving_warmup_requests')) as writer:
        for batch_size in batch_sizes:
            for _ in range(num_requests):
                request = predict_pb2.PredictRequest()
                image = np.uint8(np.random.rand(height, width, 3) * 255)
                request.inputs[input_signature].CopyFrom(
                    tf.make_tensor_proto([_encode_image(image, image_format)] *
                                         batch_size,
                                         shape=[batch_size]))
                request.model_spec.name = model_name
                request.model_spec.signature_name = 'serving_default'
                log = prediction_log_pb2.PredictionLog(
                    predict_log=prediction_log_pb2.PredictLog(request=request))
                writer.write(log.SerializeToString())
コード例 #18
0
def make_warmup_record(inputs, model_name, signature_name="serving_default"):
    predict_request = predict_pb2.PredictRequest()
    predict_request.model_spec.name = model_name
    predict_request.model_spec.signature_name = signature_name

    for key, value in inputs.items():
        predict_request.inputs[key].CopyFrom(
            tf.make_tensor_proto(value, get_tf_datatype(value)))

    log = prediction_log_pb2.PredictionLog(
        predict_log=prediction_log_pb2.PredictLog(request=predict_request))
    return log.SerializeToString()
コード例 #19
0
    def test_convert_for_regress(self):
        prediction_log = text_format.Parse(
            """
      regress_log {
        request {
          input {
            example_list {
              examples {
                features {
                  feature: {
                    key: "regress_input"
                    value: { bytes_list: { value: "feature" } }
                  }
                }
              }
            }
          }
        }
        response {
          result {
            regressions {
              value: 0.7
            }
          }
        }
      }
    """, prediction_log_pb2.PredictionLog())

        output_example_spec = text_format.Parse(
            """
        output_columns_spec {
          regress_output {
            value_column: 'regress_value'
          }
        }
    """, bulk_inferrer_pb2.OutputExampleSpec())
        expected_example = text_format.Parse(
            """
        features {
            feature: {
              key: "regress_input"
              value: { bytes_list: { value: "feature" } }
            }
            feature: {
              key: "regress_value"
              value: { float_list: { value: 0.7 } }
            }
          }
    """, tf.train.Example())
        self.assertProtoEquals(
            expected_example, utils.convert(prediction_log,
                                            output_example_spec))
コード例 #20
0
    def test_convert_for_predict_invalid_output_example_spec(self):
        example = text_format.Parse(
            """
      features {
        feature { key: "predict_input" value: { bytes_list: { value: "feature" } } }
      }""", tf.train.Example())
        prediction_log = text_format.Parse(
            """
      predict_log {
        request {
          inputs {
            key: "%s"
            value {
              dtype: DT_STRING
              tensor_shape { dim { size: 1 } }
            }
          }
       }
       response {
         outputs {
           key: "output_float"
           value {
             dtype: DT_FLOAT
             tensor_shape { dim { size: 1 } dim { size: 2 }}
             float_val: 0.1
             float_val: 0.2
           }
         }
         outputs {
           key: "output_bytes"
           value {
             dtype: DT_STRING
             tensor_shape { dim { size: 1 }}
             string_val: "prediction"
           }
         }
       }
     }
    """ % (utils.INPUT_KEY), prediction_log_pb2.PredictionLog())

        # The ending quote cannot be recognized correctly when `string_val` field
        # is directly set with a serialized string quoted in the text format.
        prediction_log.predict_log.request.inputs[
            utils.INPUT_KEY].string_val.append(example.SerializeToString())

        output_example_spec = text_format.Parse(
            """
        output_columns_spec {
        }
    """, bulk_inferrer_pb2.OutputExampleSpec())
        with self.assertRaises(ValueError):
            utils.convert(prediction_log, output_example_spec)
コード例 #21
0
def main(_):
  assets_dir = make_assets_dir(tf.flags.FLAGS.export_dir)
  with tf.Session() as session:
    random_tensors = load_saved_model(session, tf.flags.FLAGS.export_dir)
    with tf.python_io.TFRecordWriter(os.path.join(assets_dir, 'tf_serving_warmup_requests')) as writer:
      for _ in range(tf.flags.FLAGS.batch_size):
        request = predict_pb2.PredictRequest(
          model_spec=model_pb2.ModelSpec(name=tf.flags.FLAGS.name),
          inputs={k: tf.make_tensor_proto(v) for k, v in session.run(random_tensors).items()}
        )
        log = prediction_log_pb2.PredictionLog(
          predict_log=prediction_log_pb2.PredictLog(request=request))
        writer.write(log.SerializeToString())
コード例 #22
0
def main():
    """Generate TFRecords for warming up."""

    with tf.io.TFRecordWriter("tf_serving_warmup_requests") as writer:
        image_bytes = get_image_bytes()
        predict_request = predict_pb2.PredictRequest()
        predict_request.model_spec.name = 'resnet'
        predict_request.model_spec.signature_name = 'serving_default'
        predict_request.inputs['image_bytes'].CopyFrom(
            tensor_util.make_tensor_proto([image_bytes], tf.string))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=predict_request))
        for r in range(NUM_RECORDS):
            writer.write(log.SerializeToString())
コード例 #23
0
ファイル: run_inference.py プロジェクト: tensorflow/tfx-bsl
 def _post_process(
     self, examples: List[Union[tf.train.Example, bytes]],
     serialized_examples: List[bytes], outputs: Mapping[Text, np.ndarray]
 ) -> List[prediction_log_pb2.PredictionLog]:
     del serialized_examples
     classifications = None
     regressions = None
     for signature in self._signatures:
         signature_def = signature.signature_def
         if signature_def.method_name == tf.saved_model.CLASSIFY_METHOD_NAME:
             classifications = _post_process_classify(
                 self._io_tensor_spec.output_alias_tensor_names, examples,
                 outputs)
         elif signature_def.method_name == tf.saved_model.REGRESS_METHOD_NAME:
             regressions = _post_process_regress(examples, outputs)
         else:
             raise ValueError('Signature method %s is not supported for '
                              'multi inference' % signature_def.method_name)
     result = []
     for i, example in enumerate(examples):
         prediction_log = prediction_log_pb2.PredictionLog()
         input_example = (prediction_log.multi_inference_log.request.input.
                          example_list.examples.add())
         (input_example.ParseFromString if isinstance(example, bytes) else
          input_example.CopyFrom)(example)
         response = prediction_log.multi_inference_log.response
         for signature in self._signatures:
             signature_def = signature.signature_def
             inference_result = response.results.add()
             if (signature_def.method_name
                     == tf.saved_model.CLASSIFY_METHOD_NAME
                     and classifications):
                 inference_result.classification_result.classifications.add(
                 ).CopyFrom(classifications[i])
             elif (signature_def.method_name
                   == tf.saved_model.REGRESS_METHOD_NAME and regressions):
                 inference_result.regression_result.regressions.add(
                 ).CopyFrom(regressions[i])
             else:
                 raise ValueError(
                     'Signature method %s is not supported for '
                     'multi inference' % signature_def.method_name)
             inference_result.model_spec.signature_name = signature.name
         if len(response.results) != len(self._signatures):
             raise RuntimeError(
                 'Multi inference response result length does not '
                 'match the number of signatures')
         result.append(prediction_log)
     return result
コード例 #24
0
ファイル: warmup.py プロジェクト: taeokimeng/tf-serving
def main():
    """Generate TFRecords for warming up."""

    with tf.io.TFRecordWriter(
            f"models/{MODEL_NAME}/{MODEL_VERSION}/assets.extra/tf_serving_warmup_requests"
    ) as writer:
        predict_request = predict_pb2.PredictRequest()
        predict_request.model_spec.name = f'{MODEL_NAME}'
        predict_request.model_spec.signature_name = 'serving_default'
        predict_request.inputs["input_1"].CopyFrom(
            tf.make_tensor_proto(tf.reshape(X_new, (-1, 224, 224, 3))))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=predict_request))
        for r in range(NUM_RECORDS):
            writer.write(log.SerializeToString())
コード例 #25
0
 def _get_results(self, prediction_log_path):
     results = []
     filepattern = os.path.join(
         prediction_log_path,
         executor._PREDICTION_LOGS_DIR_NAME) + '-?????-of-?????.gz'
     for f in tf.io.gfile.glob(filepattern):
         record_iterator = tf.compat.v1.python_io.tf_record_iterator(
             path=f,
             options=tf.compat.v1.python_io.TFRecordOptions(
                 tf.compat.v1.python_io.TFRecordCompressionType.GZIP))
         for record_string in record_iterator:
             prediction_log = prediction_log_pb2.PredictionLog()
             prediction_log.MergeFromString(record_string)
             results.append(prediction_log)
     return results
コード例 #26
0
def main():
        # Build a batch of images.
    image_data = img_data(image_path)
    imag_data = []
    for i in range(len(image_data)):
        imag_data.append(image_data[i].numpy().tolist())
    with tf.io.TFRecordWriter("tf_serving_warmup_requests") as writer:
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'model1'
        request.model_spec.signature_name = 'serving_default'
        request.inputs['input_1'].CopyFrom(
          tf.make_tensor_proto(imag_data, shape=[len(imag_data),224,224,3]))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request))
        writer.write(log.SerializeToString())
コード例 #27
0
def main():
    with tf.python_io.TFRecordWriter(
            "/home/tata/Projects/hand_detector/inference_graph/1/assets.extra/tf_serving_warmup_requests"
    ) as writer:
        request = predict_pb2.PredictRequest()
        request.model_spec.name = serving_config.model_name
        image = cv2.imread('/home/tata/hand2.jpg')
        image_batch = np.array([image] *
                               5)  # Use a batch of 5 images for warmup
        request.inputs['inputs'].CopyFrom(
            tf.contrib.util.make_tensor_proto(image_batch,
                                              shape=image_batch.shape))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request))
        writer.write(log.SerializeToString())
コード例 #28
0
def main():
  if len(sys.argv) != 2 or sys.argv[-1].startswith('-'):
    print('Usage: resnet_warmup.py saved_model_dir')
    sys.exit(-1)

  model_dir = sys.argv[-1]
  if not os.path.isdir(model_dir):
    print('The saved model directory: %s does not exist. '
          'Specify the path of an existing model.' % model_dir)
    sys.exit(-1)

  # Create the assets.extra directory, assuming model_dir is the versioned
  # directory containing the SavedModel
  assets_dir = os.path.join(model_dir, 'assets.extra')
  if not os.path.exists(assets_dir):
    os.mkdir(assets_dir)

  warmup_file = os.path.join(assets_dir, 'tf_serving_warmup_requests')
  with tf.io.TFRecordWriter(warmup_file) as writer:
    for image in IMAGE_URLS:
      # Download the image
      dl_request = requests.get(image, stream=True)
      dl_request.raise_for_status()
      data = dl_request.content

      if not MODEL_ACCEPT_JPG:
        data = Image.open(io.BytesIO(dl_request.content))
        # Normalize and batchify the image
        data = np.array(data) / 255.0
        data = np.expand_dims(data, 0)
        data = data.astype(np.float32)

      # Create the inference request
      request = predict_pb2.PredictRequest()
      request.model_spec.name = 'resnet'
      request.model_spec.signature_name = 'serving_default'
      request.inputs['input_1'].CopyFrom(
          tf.make_tensor_proto(data))

      log = prediction_log_pb2.PredictionLog(
          predict_log=prediction_log_pb2.PredictLog(request=request))
      writer.write(log.SerializeToString())

  print('Created the file \'%s\', restart tensorflow_model_server to warmup '
        'the ResNet SavedModel.' % warmup_file)
コード例 #29
0
def main(argv):

    count = 0
    images = []
    files = [path.join(path.join(FLAGS.dataset, f))
        for f in os.listdir(FLAGS.dataset)
        if path.isfile(path.join(FLAGS.dataset, f))
    ]

    files = [f for f in files if f.endswith(('.png', '.jpg', '.jpeg'))]

    for file in files:
        img_raw = tf.image.decode_image(open(file, 'rb').read(), channels=3)
        image = preprocess_image(img_raw, FLAGS.input_size)
        image = tf.expand_dims(image, 0)
        images.append(image)

        count += 1

        if count == FLAGS.size:
            break

    input_tensor = tf.concat(images, 0)

    with tf.io.TFRecordWriter('tf_serving_warmup_requests') as writer:
        request = predict_pb2.PredictRequest(
            model_spec=model_pb2.ModelSpec(
                name=FLAGS.model_name
            ),
            inputs={
                FLAGS.input_tensor: tf.make_tensor_proto(
                    input_tensor,
                    shape=input_tensor.shape,
                    dtype=input_tensor.dtype
                )
            }
        )

        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=request)
        )

        writer.write(log.SerializeToString())
        logging.info('"tf_serving_warmup_requests" created with success!')
        logging.info('to use it paste it to the "<model>/<version>/assets.extra" folder on the serving configuration folder')
コード例 #30
0
def main():
    """Generate TFRecords for warming up."""

    with tf.io.TFRecordWriter("tf_serving_warmup_requests") as writer:
        with open(IMAGE_PATH, 'rb') as f:
            #            image_data = base64.b64encode(f.read()).decode('utf-8')
            image_data = f.read()
        predict_request = predict_pb2.PredictRequest()
        predict_request.model_spec.name = 'resnet'
        predict_request.model_spec.signature_name = 'serving_default'
        predict_request.inputs['string_inp'].CopyFrom(
            tensor_util.make_tensor_proto(image_data, shape=[
                1,
            ]))
        log = prediction_log_pb2.PredictionLog(
            predict_log=prediction_log_pb2.PredictLog(request=predict_request))
        for r in range(NUM_RECORDS):
            writer.write(log.SerializeToString())