Exemplo n.º 1
0
  def test_build_all_signature_defs_serving_only(self):
    receiver_tensor = {"input": array_ops.placeholder(dtypes.string)}
    output_1 = constant_op.constant([1.])
    export_outputs = {
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            export_output.PredictOutput(outputs=output_1),
        "train": export_output.TrainOutput(loss=output_1),
    }

    signature_defs = export_utils.build_all_signature_defs(
        receiver_tensor, export_outputs)

    expected_signature_defs = {
        "serving_default": signature_def_utils.predict_signature_def(
            receiver_tensor, {"output": output_1})
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)

    signature_defs = export_utils.build_all_signature_defs(
        receiver_tensor, export_outputs, serving_only=False)

    expected_signature_defs.update({
        "train": signature_def_utils.supervised_train_signature_def(
            receiver_tensor, loss={"loss": output_1})
    })

    self.assertDictEqual(expected_signature_defs, signature_defs)
Exemplo n.º 2
0
    def test_build_all_signature_defs_serving_only(self):
        # Force the test to run in graph mode.
        # This tests a deprecated v1 API that depends on graph-only functions such
        # as build_tensor_info.
        with ops.Graph().as_default():
            receiver_tensor = {"input": array_ops.placeholder(dtypes.string)}
            output_1 = constant_op.constant([1.])
            export_outputs = {
                signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                export_output.PredictOutput(outputs=output_1),
                "train":
                export_output.TrainOutput(loss=output_1),
            }

            signature_defs = export_utils.build_all_signature_defs(
                receiver_tensor, export_outputs)

            expected_signature_defs = {
                "serving_default":
                signature_def_utils.predict_signature_def(
                    receiver_tensor, {"output": output_1})
            }

            self.assertDictEqual(expected_signature_defs, signature_defs)

            signature_defs = export_utils.build_all_signature_defs(
                receiver_tensor, export_outputs, serving_only=False)

            expected_signature_defs.update({
                "train":
                signature_def_utils.supervised_train_signature_def(
                    receiver_tensor, loss={"loss": output_1})
            })

            self.assertDictEqual(expected_signature_defs, signature_defs)
Exemplo n.º 3
0
    def test_train_signature_def(self):
        with context.graph_mode():
            loss = {'my_loss': constant_op.constant([0])}
            predictions = {u'output1': constant_op.constant(['foo'])}
            metric_obj = metrics_module.Mean()
            metric_obj.update_state(constant_op.constant([0]))
            metrics = {
                'metrics_1':
                metric_obj,
                'metrics_2':
                (constant_op.constant([0]), constant_op.constant([10]))
            }

            outputter = export_output_lib.TrainOutput(loss, predictions,
                                                      metrics)

            receiver = {
                u'features': constant_op.constant(100, shape=(100, 2)),
                'labels': constant_op.constant(100, shape=(100, 1))
            }
            sig_def = outputter.as_signature_def(receiver)

            self.assertTrue('loss/my_loss' in sig_def.outputs)
            self.assertTrue('metrics_1/value' in sig_def.outputs)
            self.assertTrue('metrics_2/value' in sig_def.outputs)
            self.assertTrue('predictions/output1' in sig_def.outputs)
            self.assertTrue('features' in sig_def.inputs)
Exemplo n.º 4
0
def export_outputs_for_mode(mode,
                            serving_export_outputs=None,
                            predictions=None,
                            loss=None,
                            metrics=None):
    """Util function for constructing a `ExportOutput` dict given a mode.

  The returned dict can be directly passed to `build_all_signature_defs` helper
  function as the `export_outputs` argument, used for generating a SignatureDef
  map.

  Args:
    mode: A `ModeKeys` specifying the mode.
    serving_export_outputs: Describes the output signatures to be exported to
      `SavedModel` and used during serving. Should be a dict or None.
    predictions: A dict of Tensors or single Tensor representing model
        predictions. This argument is only used if serving_export_outputs is not
        set.
    loss: A dict of Tensors or single Tensor representing calculated loss.
    metrics: A dict of (metric_value, update_op) tuples, or a single tuple.
      metric_value must be a Tensor, and update_op must be a Tensor or Op

  Returns:
    Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object
    The key is the expected SignatureDef key for the mode.

  Raises:
    ValueError: if an appropriate ExportOutput cannot be found for the mode.
  """
    if mode not in SIGNATURE_KEY_MAP:
        raise ValueError(
            f'Export output type not found for `mode`: {mode}. Expected one of: '
            f'{list(SIGNATURE_KEY_MAP.keys())}.\n'
            'One likely error is that V1 Estimator Modekeys were somehow passed to '
            'this function. Please ensure that you are using the new ModeKeys.'
        )
    signature_key = SIGNATURE_KEY_MAP[mode]
    if mode_keys.is_predict(mode):
        return get_export_outputs(serving_export_outputs, predictions)
    elif mode_keys.is_train(mode):
        return {
            signature_key:
            export_output_lib.TrainOutput(loss=loss,
                                          predictions=predictions,
                                          metrics=metrics)
        }
    else:
        return {
            signature_key:
            export_output_lib.EvalOutput(loss=loss,
                                         predictions=predictions,
                                         metrics=metrics)
        }
Exemplo n.º 5
0
def export_outputs_for_mode(
    mode, serving_export_outputs=None, predictions=None, loss=None,
    metrics=None):
  """Util function for constructing a `ExportOutput` dict given a mode.

  The returned dict can be directly passed to `build_all_signature_defs` helper
  function as the `export_outputs` argument, used for generating a SignatureDef
  map.

  Args:
    mode: A `ModeKeys` specifying the mode.
    serving_export_outputs: Describes the output signatures to be exported to
      `SavedModel` and used during serving. Should be a dict or None.
    predictions: A dict of Tensors or single Tensor representing model
        predictions. This argument is only used if serving_export_outputs is not
        set.
    loss: A dict of Tensors or single Tensor representing calculated loss.
    metrics: A dict of (metric_value, update_op) tuples, or a single tuple.
      metric_value must be a Tensor, and update_op must be a Tensor or Op

  Returns:
    Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object
    The key is the expected SignatureDef key for the mode.

  Raises:
    ValueError: if an appropriate ExportOutput cannot be found for the mode.
  """
  # TODO(b/113185250): move all model export helper functions into an util file.
  if mode == mode_keys.ModeKeys.PREDICT:
    return get_export_outputs(serving_export_outputs, predictions)
  elif mode == mode_keys.ModeKeys.TRAIN:
    return {mode: export_output_lib.TrainOutput(
        loss=loss, predictions=predictions, metrics=metrics)}
  elif mode == mode_keys.ModeKeys.TEST:
    return {mode: export_output_lib.EvalOutput(
        loss=loss, predictions=predictions, metrics=metrics)}
  else:
    raise ValueError(
        'Export output type not found for mode: {}'.format(mode))