Exemple #1
0
def export_outputs_for_mode(mode,
                            serving_export_outputs=None,
                            predictions=None,
                            loss=None,
                            metrics=None):
    """Util function for constructing a `ExportOutput` dict given a mode.

  The returned dict can be directly passed to `build_all_signature_defs` helper
  function as the `export_outputs` argument, used for generating a SignatureDef
  map.

  Args:
    mode: A `ModeKeys` specifying the mode.
    serving_export_outputs: Describes the output signatures to be exported to
      `SavedModel` and used during serving. Should be a dict or None.
    predictions: A dict of Tensors or single Tensor representing model
        predictions. This argument is only used if serving_export_outputs is not
        set.
    loss: A dict of Tensors or single Tensor representing calculated loss.
    metrics: A dict of (metric_value, update_op) tuples, or a single tuple.
      metric_value must be a Tensor, and update_op must be a Tensor or Op

  Returns:
    Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object
    The key is the expected SignatureDef key for the mode.

  Raises:
    ValueError: if an appropriate ExportOutput cannot be found for the mode.
  """
    if mode not in SIGNATURE_KEY_MAP:
        raise ValueError(
            f'Export output type not found for `mode`: {mode}. Expected one of: '
            f'{list(SIGNATURE_KEY_MAP.keys())}.\n'
            'One likely error is that V1 Estimator Modekeys were somehow passed to '
            'this function. Please ensure that you are using the new ModeKeys.'
        )
    signature_key = SIGNATURE_KEY_MAP[mode]
    if mode_keys.is_predict(mode):
        return get_export_outputs(serving_export_outputs, predictions)
    elif mode_keys.is_train(mode):
        return {
            signature_key:
            export_output_lib.TrainOutput(loss=loss,
                                          predictions=predictions,
                                          metrics=metrics)
        }
    else:
        return {
            signature_key:
            export_output_lib.EvalOutput(loss=loss,
                                         predictions=predictions,
                                         metrics=metrics)
        }
  def test_eval_signature_def(self):
    with context.graph_mode():
      loss = {'my_loss': constant_op.constant([0])}
      predictions = {u'output1': constant_op.constant(['foo'])}

      outputter = export_output_lib.EvalOutput(loss, predictions, None)

      receiver = {u'features': constant_op.constant(100, shape=(100, 2)),
                  'labels': constant_op.constant(100, shape=(100, 1))}
      sig_def = outputter.as_signature_def(receiver)

      self.assertIn('loss/my_loss', sig_def.outputs)
      self.assertNotIn('metrics/value', sig_def.outputs)
      self.assertIn('predictions/output1', sig_def.outputs)
      self.assertIn('features', sig_def.inputs)
def export_outputs_for_mode(
    mode, serving_export_outputs=None, predictions=None, loss=None,
    metrics=None):
  """Util function for constructing a `ExportOutput` dict given a mode.

  The returned dict can be directly passed to `build_all_signature_defs` helper
  function as the `export_outputs` argument, used for generating a SignatureDef
  map.

  Args:
    mode: A `ModeKeys` specifying the mode.
    serving_export_outputs: Describes the output signatures to be exported to
      `SavedModel` and used during serving. Should be a dict or None.
    predictions: A dict of Tensors or single Tensor representing model
        predictions. This argument is only used if serving_export_outputs is not
        set.
    loss: A dict of Tensors or single Tensor representing calculated loss.
    metrics: A dict of (metric_value, update_op) tuples, or a single tuple.
      metric_value must be a Tensor, and update_op must be a Tensor or Op

  Returns:
    Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object
    The key is the expected SignatureDef key for the mode.

  Raises:
    ValueError: if an appropriate ExportOutput cannot be found for the mode.
  """
  # TODO(b/113185250): move all model export helper functions into an util file.
  if mode == mode_keys.ModeKeys.PREDICT:
    return get_export_outputs(serving_export_outputs, predictions)
  elif mode == mode_keys.ModeKeys.TRAIN:
    return {mode: export_output_lib.TrainOutput(
        loss=loss, predictions=predictions, metrics=metrics)}
  elif mode == mode_keys.ModeKeys.TEST:
    return {mode: export_output_lib.EvalOutput(
        loss=loss, predictions=predictions, metrics=metrics)}
  else:
    raise ValueError(
        'Export output type not found for mode: {}'.format(mode))