Esempio n. 1
0
def run_inference(examples, serving_bundle):
    """Run inference on examples given model information

  Args:
    examples: A list of examples that matches the model spec.
    serving_bundle: A `ServingBundle` object that contains the information to
      make the inference request.

  Returns:
    A tuple with the first entry being the ClassificationResponse or
    RegressionResponse proto and the second entry being a dictionary of extra
    data for each example, such as attributions, or None if no data exists.
  """
    batch_size = 64
    if serving_bundle.estimator and serving_bundle.feature_spec:
        # If provided an estimator and feature spec then run inference locally.
        preds = serving_bundle.estimator.predict(
            lambda: tf.data.Dataset.from_tensor_slices(
                tf.io.parse_example([
                    ex.SerializeToString() for ex in examples
                ], serving_bundle.feature_spec)).batch(batch_size))

        if serving_bundle.use_predict:
            preds_key = serving_bundle.predict_output_tensor
        elif serving_bundle.model_type == 'regression':
            preds_key = 'predictions'
        else:
            preds_key = 'probabilities'

        values = []
        for pred in preds:
            values.append(pred[preds_key])
        return (common_utils.convert_prediction_values(values,
                                                       serving_bundle), None)
    elif serving_bundle.custom_predict_fn:
        # If custom_predict_fn is provided, pass examples directly for local
        # inference.
        values = serving_bundle.custom_predict_fn(examples)
        extra_results = None
        # If the custom prediction function returned a dict, then parse out the
        # prediction scores. If it is just a list, then the results are the
        # prediction results without attributions or other data.
        if isinstance(values, dict):
            preds = values.pop('predictions')
            extra_results = values
        else:
            preds = values
        return (common_utils.convert_prediction_values(preds, serving_bundle),
                extra_results)
    else:
        return (platform_utils.call_servo(examples, serving_bundle), None)
Esempio n. 2
0
def run_inference(examples, serving_bundle):
    """Run inference on examples given model information

  Args:
    examples: A list of examples that matches the model spec.
    serving_bundle: A `ServingBundle` object that contains the information to
      make the inference request.

  Returns:
    A ClassificationResponse or RegressionResponse proto.
  """
    batch_size = 64
    if serving_bundle.estimator and serving_bundle.feature_spec:
        # If provided an estimator and feature spec then run inference locally.
        preds = serving_bundle.estimator.predict(
            lambda: tf.data.Dataset.from_tensor_slices(
                tf.parse_example([ex.SerializeToString()
                                  for ex in examples], serving_bundle.
                                 feature_spec)).batch(batch_size))

        if serving_bundle.use_predict:
            preds_key = serving_bundle.predict_output_tensor
        elif serving_bundle.model_type == 'regression':
            preds_key = 'predictions'
        else:
            preds_key = 'probabilities'

        values = []
        for pred in preds:
            values.append(pred[preds_key])
        return common_utils.convert_prediction_values(values, serving_bundle)
    elif serving_bundle.custom_predict_fn:
        # If custom_predict_fn is provided, pass examples directly for local
        # inference.
        values = serving_bundle.custom_predict_fn(examples)
        return common_utils.convert_prediction_values(values, serving_bundle)
    else:
        return platform_utils.call_servo(examples, serving_bundle)
Esempio n. 3
0
def run_inference(examples, serving_bundle):
    """Run inference on examples given model information.

    Args:
      examples: A list of examples that matches the model spec.
      serving_bundle: A `ServingBundle` object that contains the information to
        make the inference request.

    Returns:
      A tuple with the first entry being the ClassificationResponse or
      RegressionResponse proto and the second entry being a dictionary of extra
      data for each example, such as attributions, or None if no data exists.
    """
    batch_size = 64
    if serving_bundle.estimator and serving_bundle.feature_spec:
        # If provided an estimator and feature spec then run inference locally.
        preds = serving_bundle.estimator.predict(
            lambda: tf.data.Dataset.from_tensor_slices(
                tf.io.parse_example(
                    [ex.SerializeToString() for ex in examples],
                    serving_bundle.feature_spec,
                )
            ).batch(batch_size)
        )

        # Use the specified key if one is provided.
        key_to_use = (
            serving_bundle.predict_output_tensor
            if serving_bundle.use_predict
            else None
        )

        values = []
        for pred in preds:
            if key_to_use is None:
                # If the prediction dictionary only contains one key, use it.
                returned_keys = list(pred.keys())
                if len(returned_keys) == 1:
                    key_to_use = returned_keys[0]
                # Use default keys if necessary.
                elif serving_bundle.model_type == "classification":
                    key_to_use = "probabilities"
                else:
                    key_to_use = "predictions"
            if key_to_use not in pred:
                raise KeyError(
                    '"%s" not found in model predictions dictionary'
                    % key_to_use
                )

            values.append(pred[key_to_use])
        return (
            common_utils.convert_prediction_values(values, serving_bundle),
            None,
        )
    elif serving_bundle.custom_predict_fn:
        # If custom_predict_fn is provided, pass examples directly for local
        # inference.
        values = serving_bundle.custom_predict_fn(examples)
        extra_results = None
        # If the custom prediction function returned a dict, then parse out the
        # prediction scores. If it is just a list, then the results are the
        # prediction results without attributions or other data.
        if isinstance(values, dict):
            preds = values.pop("predictions")
            extra_results = values
        else:
            preds = values
        return (
            common_utils.convert_prediction_values(preds, serving_bundle),
            extra_results,
        )
    else:
        return (platform_utils.call_servo(examples, serving_bundle), None)