def _infer(self, request):
    """Returns JSON for the `vz-line-chart`s for a feature.

    Args:
      request: A request that should contain 'inference_address', 'model_name',
        'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.

    Returns:
      A list of JSON objects, one for each chart.
    """
    vocab_path = request.args.get('label_vocab_path')
    if vocab_path:
      try:
        with tf.gfile.GFile(vocab_path, 'r') as f:
          label_vocab = [line.rstrip('\n') for line in f]
      except tf.errors.NotFoundError as err:
        tf.logging.error('error reading vocab file: %s', err)
        label_vocab = []
    else:
      label_vocab = []

    try:
      if request.method != 'GET':
        tf.logging.error('%s requests are forbidden.', request.method)
        return http_util.Respond(request, {'error': 'invalid non-GET request'},
                                 'application/json', code=405)

      serving_bundle = inference_utils.ServingBundle(
          request.args.get('inference_address'),
          request.args.get('model_name'), request.args.get('model_type'),
          request.args.get('model_version'),
          request.args.get('model_signature'),
          request.args.get('use_predict') == 'true',
          request.args.get('predict_input_tensor'),
          request.args.get('predict_output_tensor'))
      indices_to_infer = sorted(self.updated_example_indices)
      examples_to_infer = [self.examples[index] for index in indices_to_infer]

      # Get inference results proto and combine with indices of inferred
      # examples and respond with this data as json.
      inference_result_proto = platform_utils.call_servo(
          examples_to_infer, serving_bundle)
      new_inferences = inference_utils.wrap_inference_results(
          inference_result_proto)
      infer_json = json_format.MessageToJson(
          new_inferences, including_default_value_fields=True)
      infer_obj = json.loads(infer_json)
      resp = {'indices': indices_to_infer, 'results': infer_obj}
      self.updated_example_indices = set()
      return http_util.Respond(request, {'inferences': json.dumps(resp),
                                         'vocab': json.dumps(label_vocab)},
                               'application/json')
    except common_utils.InvalidUserInputError as e:
      return http_util.Respond(request, {'error': e.message},
                               'application/json', code=400)
    except AbortionError as e:
      return http_util.Respond(request, {'error': e.details},
                               'application/json', code=400)
Esempio n. 2
0
    def chart_for_index(index_to_mutate):
        mutant_features, mutant_examples = make_mutant_tuples(
            example_proto, original_feature, index_to_mutate, viz_params)

        inference_result_proto = platform_utils.call_servo(
            mutant_examples, serving_bundle)
        return make_json_formatted_for_single_chart(mutant_features,
                                                    inference_result_proto,
                                                    index_to_mutate)
Esempio n. 3
0
  def chart_for_index(index_to_mutate):
    mutant_features, mutant_examples = make_mutant_tuples(
        example_proto, original_feature, index_to_mutate, viz_params)

    inference_result_proto = platform_utils.call_servo(
        mutant_examples, serving_bundle)
    return make_json_formatted_for_single_chart(mutant_features,
                                                inference_result_proto,
                                                index_to_mutate)
  def _infer(self, request):
    """Returns JSON for the `vz-line-chart`s for a feature.

    Args:
      request: A request that should contain 'inference_address', 'model_name',
        'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.

    Returns:
      A list of JSON objects, one for each chart.
    """
    vocab_path = request.args.get('label_vocab_path')
    if vocab_path:
      try:
        with tf.gfile.GFile(vocab_path, 'r') as f:
          label_vocab = [line.rstrip('\n') for line in f]
      except tf.errors.NotFoundError as err:
        tf.logging.error('error reading vocab file: %s', err)
        label_vocab = []
    else:
      label_vocab = []

    try:
      if request.method != 'GET':
        tf.logging.error('%s requests are forbidden.', request.method)
        return http_util.Respond(request, {'error': 'invalid non-GET request'},
                                 'application/json', code=405)

      serving_bundle = inference_utils.ServingBundle(
          request.args.get('inference_address'),
          request.args.get('model_name'), request.args.get('model_type'),
          request.args.get('model_version'),
          request.args.get('model_signature'))
      indices_to_infer = sorted(self.updated_example_indices)
      examples_to_infer = [self.examples[index] for index in indices_to_infer]

      # Get inference results proto and combine with indices of inferred
      # examples and respond with this data as json.
      inference_result_proto = platform_utils.call_servo(
          examples_to_infer, serving_bundle)
      new_inferences = inference_utils.wrap_inference_results(
          inference_result_proto)
      infer_json = json_format.MessageToJson(
          new_inferences, including_default_value_fields=True)
      infer_obj = json.loads(infer_json)
      resp = {'indices': indices_to_infer, 'results': infer_obj}
      self.updated_example_indices = set()
      return http_util.Respond(request, {'inferences': json.dumps(resp),
                                         'vocab': json.dumps(label_vocab)},
                               'application/json')
    except common_utils.InvalidUserInputError as e:
      return http_util.Respond(request, {'error': e.message},
                               'application/json', code=400)
    except AbortionError as e:
      return http_util.Respond(request, {'error': e.details},
                               'application/json', code=400)
Esempio n. 5
0
def run_inference(examples, serving_bundle):
    """Run inference on examples given model information

  Args:
    examples: A list of examples that matches the model spec.
    serving_bundle: A `ServingBundle` object that contains the information to
      make the inference request.

  Returns:
    A tuple with the first entry being the ClassificationResponse or
    RegressionResponse proto and the second entry being a dictionary of extra
    data for each example, such as attributions, or None if no data exists.
  """
    batch_size = 64
    if serving_bundle.estimator and serving_bundle.feature_spec:
        # If provided an estimator and feature spec then run inference locally.
        preds = serving_bundle.estimator.predict(
            lambda: tf.data.Dataset.from_tensor_slices(
                tf.io.parse_example([
                    ex.SerializeToString() for ex in examples
                ], serving_bundle.feature_spec)).batch(batch_size))

        if serving_bundle.use_predict:
            preds_key = serving_bundle.predict_output_tensor
        elif serving_bundle.model_type == 'regression':
            preds_key = 'predictions'
        else:
            preds_key = 'probabilities'

        values = []
        for pred in preds:
            values.append(pred[preds_key])
        return (common_utils.convert_prediction_values(values,
                                                       serving_bundle), None)
    elif serving_bundle.custom_predict_fn:
        # If custom_predict_fn is provided, pass examples directly for local
        # inference.
        values = serving_bundle.custom_predict_fn(examples)
        extra_results = None
        # If the custom prediction function returned a dict, then parse out the
        # prediction scores. If it is just a list, then the results are the
        # prediction results without attributions or other data.
        if isinstance(values, dict):
            preds = values.pop('predictions')
            extra_results = values
        else:
            preds = values
        return (common_utils.convert_prediction_values(preds, serving_bundle),
                extra_results)
    else:
        return (platform_utils.call_servo(examples, serving_bundle), None)
Esempio n. 6
0
def run_inference(examples, serving_bundle):
    """Run inference on examples given model information

  Args:
    examples: A list of examples that matches the model spec.
    serving_bundle: A `ServingBundle` object that contains the information to
      make the inference request.

  Returns:
    A ClassificationResponse or RegressionResponse proto.
  """
    batch_size = 64
    if serving_bundle.estimator and serving_bundle.feature_spec:
        # If provided an estimator and feature spec then run inference locally.
        preds = serving_bundle.estimator.predict(
            lambda: tf.data.Dataset.from_tensor_slices(
                tf.parse_example([ex.SerializeToString()
                                  for ex in examples], serving_bundle.
                                 feature_spec)).batch(batch_size))

        if serving_bundle.use_predict:
            preds_key = serving_bundle.predict_output_tensor
        elif serving_bundle.model_type == 'regression':
            preds_key = 'predictions'
        else:
            preds_key = 'probabilities'

        values = []
        for pred in preds:
            values.append(pred[preds_key])
        return common_utils.convert_prediction_values(values, serving_bundle)
    elif serving_bundle.custom_predict_fn:
        # If custom_predict_fn is provided, pass examples directly for local
        # inference.
        values = serving_bundle.custom_predict_fn(examples)
        return common_utils.convert_prediction_values(values, serving_bundle)
    else:
        return platform_utils.call_servo(examples, serving_bundle)
Esempio n. 7
0
def run_inference(examples, serving_bundle):
    """Run inference on examples given model information.

    Args:
      examples: A list of examples that matches the model spec.
      serving_bundle: A `ServingBundle` object that contains the information to
        make the inference request.

    Returns:
      A tuple with the first entry being the ClassificationResponse or
      RegressionResponse proto and the second entry being a dictionary of extra
      data for each example, such as attributions, or None if no data exists.
    """
    batch_size = 64
    if serving_bundle.estimator and serving_bundle.feature_spec:
        # If provided an estimator and feature spec then run inference locally.
        preds = serving_bundle.estimator.predict(
            lambda: tf.data.Dataset.from_tensor_slices(
                tf.io.parse_example(
                    [ex.SerializeToString() for ex in examples],
                    serving_bundle.feature_spec,
                )
            ).batch(batch_size)
        )

        # Use the specified key if one is provided.
        key_to_use = (
            serving_bundle.predict_output_tensor
            if serving_bundle.use_predict
            else None
        )

        values = []
        for pred in preds:
            if key_to_use is None:
                # If the prediction dictionary only contains one key, use it.
                returned_keys = list(pred.keys())
                if len(returned_keys) == 1:
                    key_to_use = returned_keys[0]
                # Use default keys if necessary.
                elif serving_bundle.model_type == "classification":
                    key_to_use = "probabilities"
                else:
                    key_to_use = "predictions"
            if key_to_use not in pred:
                raise KeyError(
                    '"%s" not found in model predictions dictionary'
                    % key_to_use
                )

            values.append(pred[key_to_use])
        return (
            common_utils.convert_prediction_values(values, serving_bundle),
            None,
        )
    elif serving_bundle.custom_predict_fn:
        # If custom_predict_fn is provided, pass examples directly for local
        # inference.
        values = serving_bundle.custom_predict_fn(examples)
        extra_results = None
        # If the custom prediction function returned a dict, then parse out the
        # prediction scores. If it is just a list, then the results are the
        # prediction results without attributions or other data.
        if isinstance(values, dict):
            preds = values.pop("predictions")
            extra_results = values
        else:
            preds = values
        return (
            common_utils.convert_prediction_values(preds, serving_bundle),
            extra_results,
        )
    else:
        return (platform_utils.call_servo(examples, serving_bundle), None)