def test_input_receiver_raw_values(self):
        """Tests that no errors are raised when input is expected."""
        features = {
            "feature0":
            constant_op.constant([0]),
            u"feature1":
            constant_op.constant([1]),
            "feature2":
            sparse_tensor.SparseTensor(indices=[[0, 0]],
                                       values=[1],
                                       dense_shape=[1, 1]),
        }

        labels = {
            "classes": constant_op.constant([0] * 100),
        }

        receiver_tensors = {
            "example0": array_ops.placeholder(dtypes.string, name="example0"),
            u"example1": array_ops.placeholder(dtypes.string, name="example1"),
        }
        rec = export.SupervisedInputReceiver(features["feature2"], labels,
                                             receiver_tensors)
        self.assertIsInstance(rec.features, sparse_tensor.SparseTensor)

        rec = export.SupervisedInputReceiver(features, labels["classes"],
                                             receiver_tensors)
        self.assertIsInstance(rec.labels, ops.Tensor)
    def test_input_receiver_receiver_tensors_invalid(self):
        features = {
            "feature0":
            constant_op.constant([0]),
            u"feature1":
            constant_op.constant([1]),
            "feature2":
            sparse_tensor.SparseTensor(indices=[[0, 0]],
                                       values=[1],
                                       dense_shape=[1, 1]),
        }
        labels = constant_op.constant([0])

        with self.assertRaisesRegexp(ValueError,
                                     "receiver_tensors must be defined"):
            export.SupervisedInputReceiver(features=features,
                                           labels=labels,
                                           receiver_tensors=None)

        with self.assertRaisesRegexp(ValueError,
                                     "receiver_tensors keys must be strings"):
            export.SupervisedInputReceiver(features=features,
                                           labels=labels,
                                           receiver_tensors={
                                               1:
                                               array_ops.placeholder(
                                                   dtypes.string,
                                                   name="example0")
                                           })

        with self.assertRaisesRegexp(
                ValueError, "receiver_tensor example1 must be a Tensor"):
            export.SupervisedInputReceiver(features=features,
                                           labels=labels,
                                           receiver_tensors={"example1": [1]})
def _LegacyEvalInputReceiver(  # pylint: disable=invalid-name
    features: types.TensorTypeMaybeDict,
    labels: Optional[types.TensorTypeMaybeDict],
    receiver_tensors: Dict[Text, types.TensorType],
    input_refs: Optional[types.TensorType] = None) -> EvalInputReceiverType:
  """Returns a legacy eval_input_receiver_fn.

  This is for testing purposes only.

  Args:
    features: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
      `SparseTensor`, specifying the features to be passed to the model.
    labels: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
      `SparseTensor`, specifying the labels to be passed to the model. If your
      model is an unsupervised model whose `model_fn` does not accept a `labels`
      argument, you may pass None instead.
    receiver_tensors: A dict of string to `Tensor` containing exactly key named
      'examples', which maps to the single input node that the receiver expects
      to be fed by default. Typically this is a placeholder expecting serialized
      `tf.Example` protos.
    input_refs: Optional. A 1-D integer `Tensor` that is batch-aligned with
      `features` and `labels` which is an index into
      receiver_tensors['examples'] indicating where this slice of features /
      labels came from. If not provided, defaults to range(0,
      len(receiver_tensors['examples'])).

  Raises:
    ValueError: receiver_tensors did not contain exactly one key named
      "examples".
  """
  # Force list representation for Python 3 compatibility.
  if list(receiver_tensors.keys()) != ['examples']:
    raise ValueError('receiver_tensors must contain exactly one key named '
                     'examples.')

  # Workaround for TensorFlow issue #17568. Note that we pass the
  # identity-wrapped features and labels to model_fn, but we have to feed
  # the non-identity wrapped Tensors during evaluation.
  #
  # Also note that we can't wrap predictions, so metrics that have control
  # dependencies on predictions will cause the predictions to be recomputed
  # during their evaluation.
  wrapped_features = util.wrap_tensor_or_dict_of_tensors_in_identity(features)
  if labels is not None:
    wrapped_labels = util.wrap_tensor_or_dict_of_tensors_in_identity(labels)
    receiver = export_lib.SupervisedInputReceiver(
        features=wrapped_features,
        labels=wrapped_labels,
        receiver_tensors=receiver_tensors)
  else:
    receiver = export_lib.UnsupervisedInputReceiver(
        features=wrapped_features, receiver_tensors=receiver_tensors)

  if input_refs is None:
    input_refs = tf.range(tf.size(input=receiver_tensors['examples']))
  # Note that in the collection we store the unwrapped versions, because
  # we want to feed the unwrapped versions.
  _add_tfma_collections(features, labels, input_refs)
  util.add_build_data_collection()
  return receiver
Example #4
0
 def test_multi_feature_multi_receiver(self):
   features = {"foo": constant_op.constant(5),
               "bar": constant_op.constant(6)}
   labels = {"value": constant_op.constant(5)}
   receiver_tensors = {"baz": array_ops.placeholder(dtypes.int64),
                       "qux": array_ops.placeholder(dtypes.float32)}
   _ = export.SupervisedInputReceiver(features, labels, receiver_tensors)
 def test_multi_feature_single_receiver(self):
     features = {
         "foo": constant_op.constant(5),
         "bar": constant_op.constant(6)
     }
     labels = {"value": constant_op.constant(5)}
     receiver_tensor = array_ops.placeholder(dtypes.string)
     _ = export.SupervisedInputReceiver(features, labels, receiver_tensor)
Example #6
0
 def serving_input_fn():
   """Input fn for serving export, starting from serialized example."""
   serialized_example = tf.compat.v1.placeholder(
       dtype=tf.string, shape=(None), name="serialized_example")
   for key, value in features.items():
     features[key] = tf.constant(value)
   return export.SupervisedInputReceiver(
       features=features,
       labels=tf.constant(labels),
       receiver_tensors=serialized_example)
    def test_single_feature_single_receiver(self):
        feature = constant_op.constant(5)
        label = constant_op.constant(5)
        receiver_tensor = array_ops.placeholder(dtypes.string)
        input_receiver = export.SupervisedInputReceiver(
            feature, label, receiver_tensor)

        # single receiver is automatically named
        receiver_key, = input_receiver.receiver_tensors.keys()
        self.assertEqual("input", receiver_key)
Example #8
0
  def test_input_receiver_features_invalid(self):
    features = constant_op.constant([0] * 100)
    labels = constant_op.constant([0])
    receiver_tensors = {
        "example0": array_ops.placeholder(dtypes.string, name="example0"),
        u"example1": array_ops.placeholder(dtypes.string, name="example1"),
    }

    with self.assertRaisesRegexp(ValueError, "features must be defined"):
      export.SupervisedInputReceiver(
          features=None,
          labels=labels,
          receiver_tensors=receiver_tensors)

    with self.assertRaisesRegexp(ValueError, "feature keys must be strings"):
      export.SupervisedInputReceiver(
          features={1: constant_op.constant([1])},
          labels=labels,
          receiver_tensors=receiver_tensors)

    with self.assertRaisesRegexp(ValueError, "label keys must be strings"):
      export.SupervisedInputReceiver(
          features=features,
          labels={1: constant_op.constant([1])},
          receiver_tensors=receiver_tensors)

    with self.assertRaisesRegexp(
        ValueError, "feature feature1 must be a Tensor or SparseTensor"):
      export.SupervisedInputReceiver(
          features={"feature1": [1]},
          labels=labels,
          receiver_tensors=receiver_tensors)

    with self.assertRaisesRegexp(
        ValueError, "feature must be a Tensor or SparseTensor"):
      export.SupervisedInputReceiver(
          features=[1],
          labels=labels,
          receiver_tensors=receiver_tensors)

    with self.assertRaisesRegexp(
        ValueError, "label must be a Tensor or SparseTensor"):
      export.SupervisedInputReceiver(
          features=features,
          labels=100,
          receiver_tensors=receiver_tensors)
 def test_feature_labeled_tensor(self):
     feature = LabeledTensorMock()
     label = constant_op.constant(5)
     receiver_tensor = array_ops.placeholder(dtypes.string)
     _ = export.SupervisedInputReceiver(feature, label, receiver_tensor)
Example #10
0
def EvalInputReceiver(  # pylint: disable=invalid-name
    features: types.TensorTypeMaybeDict,
    labels: Optional[types.TensorTypeMaybeDict],
    receiver_tensors: types.TensorTypeMaybeDict,
    input_refs: Optional[types.TensorType] = None,
    iterator_initializer: Optional[Text] = None) -> EvalInputReceiverType:
  """Returns an appropriate receiver for eval_input_receiver_fn.

  This is a wrapper around TensorFlow's InputReceiver that adds additional
  entries and prefixes to the input tensors so that features and labels can be
  discovered at evaluation time. It also wraps the features and labels tensors
  in identity to workaround TensorFlow issue #17568.

  The resulting signature_def.inputs will have the following form:
    inputs/<input>     - placeholders that are used for input processing (i.e
                         receiver_tensors). If receiver_tensors is a tensor and
                         not a dict, then this will just be named 'inputs'.
    input_refs         - reference to input_refs tensor (see below).
    features/<feature> - references to tensors passed in features. If features
                         is a tensor and not a dict, then this will just be
                         named 'features'.
    labels/<label>     - references to tensors passed in labels. If labels is
                         a tensor and not a dict, then this will just be named
                         'labels'.

  Args:
    features: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
      `SparseTensor`, specifying the features to be passed to the model.
    labels: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
      `SparseTensor`, specifying the labels to be passed to the model. If your
      model is an unsupervised model whose `model_fn` does not accept a `labels`
      argument, you may pass None instead.
    receiver_tensors: A dict of string to `Tensor` containing exactly key named
      'examples', which maps to the single input node that the receiver expects
      to be fed by default. Typically this is a placeholder expecting serialized
      `tf.Example` protos.
    input_refs: Optional (unless iterator_initializer used). A 1-D integer
      `Tensor` that is batch-aligned with `features` and `labels` which is an
      index into receiver_tensors['examples'] indicating where this slice of
      features / labels came from. If not provided, defaults to range(0,
      len(receiver_tensors['examples'])).
    iterator_initializer: Optional name of tf.compat.v1.data.Iterator
      initializer used when the inputs are fed using an iterator. This is
      intended to be used by models that cannot handle a single large input due
      to memory resource constraints. For example, a model that takes a
      tf.train.SequenceExample record as input but only processes smaller
      batches of examples within the overall sequence at a time. The caller is
      responsible for setting the input_refs appropriately (i.e. all examples
      belonging to the same tf.train.Sequence should have the same input_ref).

  Raises:
    ValueError: receiver_tensors did not contain exactly one key named
      "examples" or iterator_initializer used without input_refs.
  """
  if list(receiver_tensors.keys()) != ['examples']:
    raise ValueError('receiver_tensors must contain exactly one key named '
                     'examples.')

  if input_refs is None:
    if iterator_initializer is not None:
      raise ValueError('input_refs is required if iterator_initializer is used')
    input_refs = tf.range(tf.size(input=list(receiver_tensors.values())[0]))

  updated_receiver_tensors = {}

  def add_tensors(prefix, tensor_or_dict):
    if isinstance(tensor_or_dict, dict):
      for key in tensor_or_dict:
        updated_receiver_tensors[prefix + '/' + key] = tensor_or_dict[key]
    else:
      updated_receiver_tensors[prefix] = tensor_or_dict

  add_tensors(constants.SIGNATURE_DEF_INPUTS_PREFIX, receiver_tensors)
  add_tensors(constants.FEATURES_NAME, features)
  if labels is not None:
    add_tensors(constants.LABELS_NAME, labels)
  updated_receiver_tensors[constants.SIGNATURE_DEF_INPUT_REFS_KEY] = (
      input_refs)
  if iterator_initializer:
    updated_receiver_tensors[
        constants.SIGNATURE_DEF_ITERATOR_INITIALIZER_KEY] = (
            tf.constant(iterator_initializer))
  updated_receiver_tensors[constants.SIGNATURE_DEF_TFMA_VERSION_KEY] = (
      tf.constant(version.VERSION_STRING))

  # TODO(b/119308261): Remove once all evaluator binaries have been updated.
  _add_tfma_collections(features, labels, input_refs)
  util.add_build_data_collection()

  # Workaround for TensorFlow issue #17568. Note that we pass the
  # identity-wrapped features and labels to model_fn, but we have to feed
  # the non-identity wrapped Tensors during evaluation.
  #
  # Also note that we can't wrap predictions, so metrics that have control
  # dependencies on predictions will cause the predictions to be recomputed
  # during their evaluation.
  wrapped_features = util.wrap_tensor_or_dict_of_tensors_in_identity(features)
  if labels is not None:
    wrapped_labels = util.wrap_tensor_or_dict_of_tensors_in_identity(labels)
    return export_lib.SupervisedInputReceiver(
        features=wrapped_features,
        labels=wrapped_labels,
        receiver_tensors=updated_receiver_tensors)
  else:
    return export_lib.UnsupervisedInputReceiver(
        features=wrapped_features, receiver_tensors=updated_receiver_tensors)
Example #11
0
def EvalInputReceiver(  # pylint: disable=invalid-name
        features,
        labels,
        receiver_tensors,
        input_refs=None):
    """Returns an appropriate receiver for eval_input_receiver_fn.

  This is a wrapper around TensorFlow's InputReceiver that adds additional
  entries and prefixes to the input tensors so that features and labels can be
  discovered at evaluation time. It also wraps the features and labels tensors
  in identity to workaround TensorFlow issue #17568.

  The resulting signature_def.inputs will have the following form:
    inputs/<input>     - placeholders that are used for input processing (i.e
                         receiver_tensors). If receiver_tensors is a tensor and
                         not a dict, then this will just be named 'inputs'.
    input_refs         - reference to input_refs tensor (see below).
    features/<feature> - references to tensors passed in features. If features
                         is a tensor and not a dict, then this will just be
                         named 'features'.
    labels/<label>     - references to tensors passed in labels. If labels is
                         a tensor and not a dict, then this will just be named
                         'labels'.

  Args:
    features: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
      `SparseTensor`, specifying the features to be passed to the model.
    labels: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
      `SparseTensor`, specifying the labels to be passed to the model. If your
      model is an unsupervised model whose `model_fn` does not accept a `labels`
      argument, you may pass None instead.
    receiver_tensors: A dict of string to `Tensor` containing exactly key named
      'examples', which maps to the single input node that the receiver expects
      to be fed by default. Typically this is a placeholder expecting serialized
      `tf.Example` protos.
    input_refs: Optional. A 1-D integer `Tensor` that is batch-aligned with
      `features` and `labels` which is an index into
      receiver_tensors['examples'] indicating where this slice of features /
      labels came from. If not provided, defaults to range(0,
      len(receiver_tensors['examples'])).

  Raises:
    ValueError: receiver_tensors did not contain exactly one key named
      "examples".
  """
    if list(receiver_tensors.keys()) != ['examples']:
        raise ValueError('receiver_tensors must contain exactly one key named '
                         'examples.')

    if input_refs is None:
        input_refs = tf.range(tf.size(list(receiver_tensors.values())[0]))

    updated_receiver_tensors = {}

    def add_tensors(prefix, tensor_or_dict):
        if isinstance(tensor_or_dict, dict):
            for key in tensor_or_dict:
                updated_receiver_tensors[prefix + '/' +
                                         key] = tensor_or_dict[key]
        else:
            updated_receiver_tensors[prefix] = tensor_or_dict

    add_tensors(constants.SIGNATURE_DEF_INPUTS_PREFIX, receiver_tensors)
    add_tensors(constants.SIGNATURE_DEF_FEATURES_PREFIX, features)
    if labels is not None:
        add_tensors(constants.SIGNATURE_DEF_LABELS_PREFIX, labels)
    updated_receiver_tensors[constants.SIGNATURE_DEF_INPUT_REFS_KEY] = (
        input_refs)
    updated_receiver_tensors[constants.SIGNATURE_DEF_TFMA_VERSION_KEY] = (
        tf.constant(version.VERSION_STRING))

    _add_tfma_collections(features, labels, input_refs)

    # Workaround for TensorFlow issue #17568. Note that we pass the
    # identity-wrapped features and labels to model_fn, but we have to feed
    # the non-identity wrapped Tensors during evaluation.
    #
    # Also note that we can't wrap predictions, so metrics that have control
    # dependencies on predictions will cause the predictions to be recomputed
    # during their evaluation.
    wrapped_features = util.wrap_tensor_or_dict_of_tensors_in_identity(
        features)
    if labels is not None:
        wrapped_labels = util.wrap_tensor_or_dict_of_tensors_in_identity(
            labels)
        return export_lib.SupervisedInputReceiver(
            features=wrapped_features,
            labels=wrapped_labels,
            receiver_tensors=updated_receiver_tensors)
    else:
        return export_lib.UnsupervisedInputReceiver(
            features=wrapped_features,
            receiver_tensors=updated_receiver_tensors)