Example #1
0
def _convert_to_tensors_or_sparse_tensors(a, b):
    """Convert to tensor types, and flip order if necessary.

  Args:
    a: `Tensor` or `SparseTensor` of the same type as `b`.
    b: `Tensor` or `SparseTensor` of the same type as `a`.

  Returns:
    Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to
    `Tensor` or `SparseTensor`, and `flipped` indicates whether the order has
    been flipped to make it dense,sparse instead of sparse,dense (since the set
    ops do not support the latter).
  """
    a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
    if a.dtype.base_dtype not in _VALID_DTYPES:
        raise TypeError(
            f"'a' has invalid dtype `{a.dtype}` not in supported dtypes: "
            f"`{_VALID_DTYPES}`.")
    b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b")
    if b.dtype.base_dtype != a.dtype.base_dtype:
        raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
    if (isinstance(a, sparse_tensor.SparseTensor)
            and not isinstance(b, sparse_tensor.SparseTensor)):
        return b, a, True
    return a, b, False
Example #2
0
def _set_operation(a, b, set_operation, validate_indices=True):
    """Compute set operation of elements in last dimension of `a` and `b`.

  All but the last dimension of `a` and `b` must match.

  Args:
    a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
        must be sorted in row-major order.
    b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
        `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
        sorted in row-major order.
    set_operation: String indicating set operaiton. See
        SetOperationOp::SetOperationFromContext for valid values.
    validate_indices: Whether to validate the order and range of sparse indices
       in `a` and `b`.

  Returns:
    A `SparseTensor` with the same rank as `a` and `b`, and all but the last
    dimension the same. Elements along the last dimension contain the results
    of the set operation.

  Raises:
    TypeError: If inputs are invalid types.
    ValueError: If `a` is sparse and `b` is dense.
  """
    a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
    if a.dtype.base_dtype not in _VALID_DTYPES:
        raise TypeError("'a' invalid dtype %s." % a.dtype)
    b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b")
    if b.dtype.base_dtype != a.dtype.base_dtype:
        raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
    # pylint: disable=protected-access
    if isinstance(a, sparse_tensor.SparseTensor):
        if isinstance(b, sparse_tensor.SparseTensor):
            indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(
                a.indices, a.values, a.shape, b.indices, b.values,
                b.dense_shape, set_operation, validate_indices)
        else:
            raise ValueError(
                "Sparse,Dense is not supported, but Dense,Sparse is. "
                "Please flip the order of your inputs.")
    elif isinstance(b, sparse_tensor.SparseTensor):
        indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(
            a, b.indices, b.values, b.dense_shape, set_operation,
            validate_indices)
    else:
        indices, values, shape = gen_set_ops.dense_to_dense_set_operation(
            a, b, set_operation, validate_indices)
    # pylint: enable=protected-access
    return sparse_tensor.SparseTensor(indices, values, shape)
 def test_convert_sparse(self):
   with self.test_session():
     indices = [[0, 1], [1, 0]]
     values = [42, 43]
     shape = [2, 2]
     sparse_tensor_value = sparse_tensor.SparseTensorValue(
         indices, values, shape)
     st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)
     from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
         sparse_tensor_value).eval()
     from_tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(st).eval()
     for convertee in [from_value, from_tensor]:
       self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)
       self.assertAllEqual(sparse_tensor_value.values, convertee.values)
       self.assertAllEqual(sparse_tensor_value.dense_shape, convertee.shape)
Example #4
0
def _set_operation(a, b, set_operation, validate_indices=True):
  """Compute set operation of elements in last dimension of `a` and `b`.

  All but the last dimension of `a` and `b` must match.

  Args:
    a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
        must be sorted in row-major order.
    b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
        `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
        sorted in row-major order.
    set_operation: String indicating set operaiton. See
        SetOperationOp::SetOperationFromContext for valid values.
    validate_indices: Whether to validate the order and range of sparse indices
       in `a` and `b`.

  Returns:
    A `SparseTensor` with the same rank as `a` and `b`, and all but the last
    dimension the same. Elements along the last dimension contain the results
    of the set operation.

  Raises:
    TypeError: If inputs are invalid types.
    ValueError: If `a` is sparse and `b` is dense.
  """
  a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
  if a.dtype.base_dtype not in _VALID_DTYPES:
    raise TypeError("'a' invalid dtype %s." % a.dtype)
  b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b")
  if b.dtype.base_dtype != a.dtype.base_dtype:
    raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
  # pylint: disable=protected-access
  if isinstance(a, sparse_tensor.SparseTensor):
    if isinstance(b, sparse_tensor.SparseTensor):
      indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(
          a.indices, a.values, a.shape, b.indices, b.values, b.dense_shape,
          set_operation, validate_indices)
    else:
      raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. "
                       "Please flip the order of your inputs.")
  elif isinstance(b, sparse_tensor.SparseTensor):
    indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(
        a, b.indices, b.values, b.dense_shape, set_operation, validate_indices)
  else:
    indices, values, shape = gen_set_ops.dense_to_dense_set_operation(
        a, b, set_operation, validate_indices)
  # pylint: enable=protected-access
  return sparse_tensor.SparseTensor(indices, values, shape)
Example #5
0
 def new_model_fn(features, labels, mode, config):  # pylint: disable=missing-docstring
     spec = estimator.model_fn(features, labels, mode, config)
     predictions = spec.predictions
     if predictions is None:
         return spec
     verify_keys_and_predictions(features, predictions)
     for key in get_keys(features):
         feature = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
             features[key])
         if sparse_default_values and (key in sparse_default_values):
             if not isinstance(feature, sparse_tensor_lib.SparseTensor):
                 raise ValueError(
                     'Feature ({}) is expected to be a `SparseTensor`.'.
                     format(key))
             feature = sparse_ops.sparse_tensor_to_dense(
                 feature, default_value=sparse_default_values[key])
         if not isinstance(feature, ops.Tensor):
             raise ValueError(
                 'Feature ({}) should be a Tensor. Please use `keys` '
                 'argument of forward_features to filter unwanted features, or'
                 'add key to argument `sparse_default_values`.'
                 'Type of features[{}] is {}.'.format(
                     key, key, type(feature)))
         predictions[key] = feature
     spec = spec._replace(predictions=predictions)
     if spec.export_outputs:  # CHANGES HERE
         outputs = spec.export_outputs['predict'].outputs
         outputs[key] = spec.predictions[key]
         spec.export_outputs['predict'] = tf.estimator.export.PredictOutput(
             outputs)
         spec.export_outputs[
             'serving_default'] = tf.estimator.export.PredictOutput(outputs)
     return spec
Example #6
0
def _check_and_reshape_dense_labels(labels, expected_labels_dimension):
    """Checks dense labels type and shape and reshapes to 2D Tensor."""
    with ops.name_scope(None, 'labels', (labels, )) as scope:
        labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
        if isinstance(labels, sparse_tensor.SparseTensor):
            raise ValueError(
                'SparseTensor labels are not supported. '
                'labels must be a Tensor of shape [batch_size, %s]. '
                'Suggested Fix (1): Check the label feature in your data. '
                'Each example must contain %s value(s). If not, your choice of label '
                'was probably incorrect. '
                'Suggested Fix (2): In your input_fn, use '
                'tf.sparse_tensor_to_dense() to turn labels into a Tensor.'
                '' % (expected_labels_dimension, expected_labels_dimension))
        labels = _maybe_expand_dim(labels)
        labels_shape = array_ops.shape(labels)
        err_msg = 'labels shape must be [batch_size, {}]'.format(
            expected_labels_dimension)
        assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
        with ops.control_dependencies([assert_rank]):
            static_shape = labels.shape
            if static_shape is not None:
                dim1 = static_shape[1]
                if (dim1 is not None) and (dim1 != expected_labels_dimension):
                    raise ValueError(
                        'Mismatched label shape. '
                        'Classifier configured with n_classes=%s.  Received %s. '
                        'Suggested Fix: check your n_classes argument to the estimator '
                        'and/or the shape of your label.' %
                        (expected_labels_dimension, dim1))
            assert_dimension = check_ops.assert_equal(
                expected_labels_dimension, labels_shape[1], message=err_msg)
            with ops.control_dependencies([assert_dimension]):
                return array_ops.identity(labels, name=scope)
Example #7
0
def set_size(a, validate_indices=True):
    """Compute number of unique elements along last dimension of `a`.

  Args:
    a: `SparseTensor`, with indices sorted in row-major order.
    validate_indices: Whether to validate the order and range of sparse indices
      in `a`.

  Returns:
    `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
    rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
    number of unique elements in the corresponding `[0...n-1]` dimension of `a`.

  Raises:
    TypeError: If `a` is an invalid types.
  """
    a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
    if not isinstance(a, sparse_tensor.SparseTensor):
        raise TypeError("Expected `SparseTensor`, got %s." % a)
    if a.values.dtype.base_dtype not in _VALID_DTYPES:
        raise TypeError(
            f"Invalid dtype `{a.values.dtype}` not in supported dtypes: "
            f"`{_VALID_DTYPES}`.")
    # pylint: disable=protected-access
    return gen_set_ops.set_size(a.indices, a.values, a.dense_shape,
                                validate_indices)
Example #8
0
def _clone_and_build_model(mode,
                           keras_model,
                           custom_objects,
                           features=None,
                           labels=None):
  """Clone and build the given keras_model.

  Args:
    mode: training mode.
    keras_model: an instance of compiled keras model.
    custom_objects: Dictionary for custom objects.
    features:
    labels:

  Returns:
    The newly built model.
  """
  # Set to True during training, False for inference.
  K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)

  # Clone keras model.
  input_tensors = None if features is None else _create_ordered_io(
      keras_model, features)
  if custom_objects:
    with CustomObjectScope(custom_objects):
      model = models.clone_model(keras_model, input_tensors=input_tensors)
  else:
    model = models.clone_model(keras_model, input_tensors=input_tensors)

  # Compile/Build model
  if mode is model_fn_lib.ModeKeys.PREDICT and not model.built:
    model.build()
  else:
    optimizer_config = keras_model.optimizer.get_config()
    optimizer = keras_model.optimizer.__class__.from_config(optimizer_config)
    optimizer.iterations = training_util.get_or_create_global_step()

    # Get list of outputs.
    if labels is None:
      target_tensors = None
    elif isinstance(labels, dict):
      target_tensors = _create_ordered_io(keras_model, labels, is_input=False)
    else:
      target_tensors = [
          _cast_tensor_to_floatx(
              sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(labels))
      ]

    model.compile(
        optimizer,
        keras_model.loss,
        metrics=keras_model.metrics,
        loss_weights=keras_model.loss_weights,
        sample_weight_mode=keras_model.sample_weight_mode,
        weighted_metrics=keras_model.weighted_metrics,
        target_tensors=target_tensors)

  if isinstance(model, models.Sequential):
    model = model.model
  return model
Example #9
0
def _check_labels(labels, expected_labels_dimension):
  """Check labels type and shape."""
  with ops.name_scope(None, 'labels', (labels,)) as scope:
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      raise ValueError('SparseTensor labels are not supported.')
    labels_shape = array_ops.shape(labels)
    err_msg = 'labels shape must be [batch_size, {}]'.format(
        expected_labels_dimension)
    assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
    with ops.control_dependencies([assert_rank]):
      static_shape = labels.shape
      if static_shape is not None:
        dim1 = static_shape[1]
        if (dim1 is not None) and (dim1 != expected_labels_dimension):
          raise ValueError(
              'Mismatched label shape. '
              'Classifier configured with n_classes=%s.  Received %s. '
              'Suggested Fix: check your n_classes argument to the estimator '
              'and/or the shape of your label.' %
              (expected_labels_dimension, dim1))
      assert_dimension = check_ops.assert_equal(
          expected_labels_dimension, labels_shape[1], message=err_msg)
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(labels, name=scope)
Example #10
0
  def new_model_fn(features, labels, mode, config):  # pylint: disable=missing-docstring
    spec = estimator.model_fn(features, labels, mode, config)
    predictions = spec.predictions
    if predictions is None:
      return spec
    verify_keys_and_predictions(features, predictions)
    for key in get_keys(features):
      feature = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
          features[key])
      if not isinstance(feature, ops.Tensor):
        raise ValueError(
            'Forwarded feature ({}) should be a Tensor. Please use keys '
            'argument of forward_features to filter unwanted features. Type of '
            'features[{}] is {}.'.format(key, key, type(feature)))
      predictions[key] = feature
    spec = spec._replace(predictions=predictions)
    if spec.export_outputs:
      for ekey in ['predict', 'serving_default']:
        if (ekey in spec.export_outputs and
            isinstance(spec.export_outputs[ekey],
                       PredictOutput)):
          export_outputs = spec.export_outputs[ekey].outputs
          for key in get_keys(features):
            export_outputs[key] = predictions[key]

    return spec
Example #11
0
def _check_labels(labels, expected_labels_dimension):
    """Check labels type and shape."""
    with ops.name_scope(None, 'labels', (labels, )) as scope:
        labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
        if isinstance(labels, sparse_tensor.SparseTensor):
            raise ValueError('SparseTensor labels are not supported.')
        labels_shape = array_ops.shape(labels)
        err_msg = 'labels shape must be [batch_size, {}]'.format(
            expected_labels_dimension)
        assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
        with ops.control_dependencies([assert_rank]):
            static_shape = labels.shape
            if static_shape is not None:
                dim1 = static_shape[1]
                if (dim1 is not None) and (dim1 != expected_labels_dimension):
                    raise ValueError(
                        'Mismatched label shape. '
                        'Classifier configured with n_classes=%s.  Received %s. '
                        'Suggested Fix: check your n_classes argument to the estimator '
                        'and/or the shape of your label.' %
                        (expected_labels_dimension, dim1))
            assert_dimension = check_ops.assert_equal(
                expected_labels_dimension, labels_shape[1], message=err_msg)
            with ops.control_dependencies([assert_dimension]):
                return array_ops.identity(labels, name=scope)
Example #12
0
 def test_convert_sparse(self):
   with self.cached_session():
     indices = [[0, 1], [1, 0]]
     values = [42, 43]
     shape = [2, 2]
     sparse_tensor_value = sparse_tensor.SparseTensorValue(
         indices, values, shape)
     st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)
     from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
         sparse_tensor_value).eval()
     from_tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(st).eval()
     for convertee in [from_value, from_tensor]:
       self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)
       self.assertAllEqual(sparse_tensor_value.values, convertee.values)
       self.assertAllEqual(
           sparse_tensor_value.dense_shape, convertee.dense_shape)
Example #13
0
def _convert_tensor(x):
    """Create or cast tensor if needed."""
    if not tensor_util.is_tensor(x):
        # x is a numpy array
        x = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(x)
    if check_ops.is_numeric_tensor(x):
        # is_numeric_tensor returns False if provided with a numpy array
        x = _cast_tensor_to_floatx(x)
    return x
Example #14
0
def from_sparse(st_input, name=None):
    """Converts a 2D `SparseTensor` to a `RaggedTensor`.

  Each row of the `output` `RaggedTensor` will contain the explicit values from
  the same row in `st_input`.  `st_input` must be ragged-right.  If not it is
  not ragged-right, then an error will be generated.

  Example:

  ```python
  >>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],
  ...                   values=[1, 2, 3, 4, 5],
  ...                   dense_shape=[4, 3])
  >>> ragged.from_sparse(st).eval().tolist()
  [[1, 2, 3], [4], [], [5]]
  ```

  Currently, only two-dimensional `SparseTensors` are supported.

  Args:
    st_input: The sparse tensor to convert.  Must have rank 2.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `RaggedTensor` with the same values as `st_input`.
    `output.ragged_rank = rank(st_input) - 1`.
    `output.shape = [st_input.dense_shape[0], None]`.
  Raises:
    ValueError: If the number of dimensions in `st_input` is not known
      statically, or is not two.
  """
    if not sparse_tensor.is_sparse(st_input):
        raise TypeError('Expected SparseTensor, got %s' %
                        type(st_input).__name__)
    with ops.name_scope(name, 'RaggedFromSparse', [st_input]):
        st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
            st_input, name='rt_input')

        static_rank_from_dense_shape = (
            None if st_input.dense_shape.shape.ndims is None else
            st_input.dense_shape.shape.dims[0].value)
        static_rank_from_indices = (None
                                    if st_input.indices.shape.ndims is None
                                    else st_input.indices.shape.dims[1].value)

        if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
            raise ValueError('rank(st_input) must be 2')

        with ops.control_dependencies(
                _assert_sparse_indices_are_ragged_right(st_input.indices)):
            # Treat sparse row indices as segment ids to generate a splits tensor that
            # we can pair with the sparse tensor values.  (Ignore sparse column
            # indices.)
            segment_ids = st_input.indices[:, 0]
            num_segments = st_input.dense_shape[0]
            return ragged_factory_ops.from_value_rowids(
                st_input.values, segment_ids, num_segments)
Example #15
0
def _convert_tensor(x):
  """Create or cast tensor if needed."""
  if not tensor_util.is_tensor(x):
    # x is a numpy array
    x = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(x)
  if check_ops.is_numeric_tensor(x):
    # is_numeric_tensor returns False if provided with a numpy array
    x = _cast_tensor_to_floatx(x)
  return x
def from_sparse(st_input, name=None):
  """Converts a 2D `SparseTensor` to a `RaggedTensor`.

  Each row of the `output` `RaggedTensor` will contain the explicit values from
  the same row in `st_input`.  `st_input` must be ragged-right.  If not it is
  not ragged-right, then an error will be generated.

  Example:

  ```python
  >>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],
  ...                   values=[1, 2, 3, 4, 5],
  ...                   dense_shape=[4, 3])
  >>> ragged.from_sparse(st).eval().tolist()
  [[1, 2, 3], [4], [], [5]]
  ```

  Currently, only two-dimensional `SparseTensors` are supported.

  Args:
    st_input: The sparse tensor to convert.  Must have rank 2.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `RaggedTensor` with the same values as `st_input`.
    `output.ragged_rank = rank(st_input) - 1`.
    `output.shape = [st_input.dense_shape[0], None]`.
  Raises:
    ValueError: If the number of dimensions in `st_input` is not known
      statically, or is not two.
  """
  if not sparse_tensor.is_sparse(st_input):
    raise TypeError('Expected SparseTensor, got %s' % type(st_input).__name__)
  with ops.name_scope(name, 'RaggedFromSparse', [st_input]):
    st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
        st_input, name='rt_input')

    static_rank_from_dense_shape = (
        None if st_input.dense_shape.shape.ndims is None
        else st_input.dense_shape.shape.dims[0].value)
    static_rank_from_indices = (
        None if st_input.indices.shape.ndims is None
        else st_input.indices.shape.dims[1].value)

    if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
      raise ValueError('rank(st_input) must be 2')

    with ops.control_dependencies(
        _assert_sparse_indices_are_ragged_right(st_input.indices)):
      # Treat sparse row indices as segment ids to generate a splits tensor that
      # we can pair with the sparse tensor values.  (Ignore sparse column
      # indices.)
      segment_ids = st_input.indices[:, 0]
      num_segments = st_input.dense_shape[0]
      return ragged_factory_ops.from_value_rowids(st_input.values, segment_ids,
                                                  num_segments)
Example #17
0
def _maybe_expand_dim(tensor):
  """Expand the dim of `tensor` with static rank 1."""
  with ops.name_scope(None, 'maybe_expand_dim', (tensor,)):
    tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
    if isinstance(tensor, sparse_tensor.SparseTensor):
      raise ValueError('SparseTensor labels are not supported.')
    static_shape = tensor.shape
    if static_shape is None:
      return tensor

    return (array_ops.expand_dims(tensor, -1) if static_shape.ndims == 1
            else tensor)
Example #18
0
def _maybe_expand_dim(tensor):
  """Expand the dim of `tensor` with static rank 1."""
  with ops.name_scope(None, 'maybe_expand_dim', (tensor,)):
    tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
    if isinstance(tensor, sparse_tensor.SparseTensor):
      raise ValueError('SparseTensor labels are not supported.')
    static_shape = tensor.shape
    if static_shape is None:
      return tensor

    return (array_ops.expand_dims(tensor, -1) if static_shape.ndims == 1
            else tensor)
Example #19
0
 def _convert_feature_to_tensor(self, name, value):
     """Casts features to the correct dtype based on their name."""
     if name in [
             feature_keys.TrainEvalFeatures.TIMES,
             feature_keys.PredictionFeatures.TIMES
     ]:
         return math_ops.cast(value, dtypes.int64)
     if name == feature_keys.TrainEvalFeatures.VALUES:
         return math_ops.cast(value, self.model.dtype)
     if name == feature_keys.PredictionFeatures.STATE_TUPLE:
         return value  # Correct dtypes are model-dependent
     return sparse_tensor.convert_to_tensor_or_sparse_tensor(value)
Example #20
0
 def _convert_feature_to_tensor(self, name, value):
   """Casts features to the correct dtype based on their name."""
   if name in [
       feature_keys.TrainEvalFeatures.TIMES,
       feature_keys.PredictionFeatures.TIMES
   ]:
     return math_ops.cast(value, dtypes.int64)
   if name == feature_keys.TrainEvalFeatures.VALUES:
     return math_ops.cast(value, self.model.dtype)
   if name == feature_keys.PredictionFeatures.STATE_TUPLE:
     return value  # Correct dtypes are model-dependent
   return sparse_tensor.convert_to_tensor_or_sparse_tensor(value)
Example #21
0
def _convert_to_tensors_or_sparse_tensors(a, b):
  """Convert to tensor types, and flip order if necessary.

  Args:
    a: `Tensor` or `SparseTensor` of the same type as `b`.
    b: `Tensor` or `SparseTensor` of the same type as `a`.

  Returns:
    Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to
    `Tensor` or `SparseTensor`, and `flipped` indicates whether the order has
    been flipped to make it dense,sparse instead of sparse,dense (since the set
    ops do not support the latter).
  """
  a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
  if a.dtype.base_dtype not in _VALID_DTYPES:
    raise TypeError("'a' invalid dtype %s." % a.dtype)
  b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b")
  if b.dtype.base_dtype != a.dtype.base_dtype:
    raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
  if (isinstance(a, sparse_tensor.SparseTensor) and
      not isinstance(b, sparse_tensor.SparseTensor)):
    return b, a, True
  return a, b, False
Example #22
0
def to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
    """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.

    If `input_tensor` is already a `SparseTensor`, just return it.

    Args:
      input_tensor: A string or integer `Tensor`.
      ignore_value: Entries in `dense_tensor` equal to this value will be
        absent from the resulting `SparseTensor`. If `None`, default value of
        `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).

    Returns:
      A `SparseTensor` with the same shape as `input_tensor`.

    Raises:
      ValueError: when `input_tensor`'s rank is `None`.
    """
    input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
        input_tensor)
    if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
        return input_tensor
    with ops.name_scope(None, 'to_sparse_input', (
            input_tensor,
            ignore_value,
    )):
        if ignore_value is None:
            if input_tensor.dtype == dtypes.string:
                # Exception due to TF strings are converted to numpy objects by default.
                ignore_value = ''
            elif input_tensor.dtype.is_integer:
                ignore_value = -1  # -1 has a special meaning of missing feature
            else:
                # NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
                # constructing a new numpy object of the given type, which yields the
                # default value for that type.
                ignore_value = input_tensor.dtype.as_numpy_dtype()
        ignore_value = math_ops.cast(ignore_value,
                                     input_tensor.dtype,
                                     name='ignore_value')
        indices = array_ops.where(math_ops.not_equal(input_tensor,
                                                     ignore_value),
                                  name='indices')
        return sparse_tensor_lib.SparseTensor(
            indices=indices,
            values=array_ops.gather_nd(input_tensor, indices, name='values'),
            dense_shape=array_ops.shape(input_tensor,
                                        out_type=dtypes.int64,
                                        name='dense_shape'))
Example #23
0
    def get(self, key):
        """Returns a `Tensor` for the given key.

    A `str` key is used to access a base feature (not-transformed). When a
    `_FeatureColumn` is passed, the transformed feature is returned if it
    already exists, otherwise the given `_FeatureColumn` is asked to provide its
    transformed output, which is then cached.

    Args:
      key: a `str` or a `_FeatureColumn`.

    Returns:
      The transformed `Tensor` corresponding to the `key`.

    Raises:
      ValueError: if key is not found or a transformed `Tensor` cannot be
        computed.
    """
        if key in self._feature_tensors:
            # FeatureColumn is already transformed or converted.
            return self._feature_tensors[key]

        if key in self._features:
            # FeatureColumn is a raw feature.
            feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
                self._features[key])
            self._feature_tensors[key] = feature_tensor
            return feature_tensor

        if not isinstance(key, (str, _FeatureColumn)):
            raise TypeError(
                '"key" must be either a "str" or "_FeatureColumn". '
                'Provided: {}'.format(key))

        if not isinstance(key, _FeatureColumn):
            raise ValueError(
                'Feature {} is not in features dictionary.'.format(key))

        column = key
        logging.debug('Transforming feature_column %s.', column)
        # pylint: disable=protected-access
        transformed = column._transform_feature(self)
        # pylint: enable=protected-access
        if transformed is None:
            raise ValueError('Column {} is not supported.'.format(column.name))
        self._feature_tensors[column] = transformed
        return transformed
Example #24
0
def down_sample(source, freq_vocab, replacement='', threshold=1e-3, min_freq=0, seed=None, name=None):
    """Randomly down-sample high frequency tokens in `source` with `replacement` value.

    Args:
        source: string `Tensor` or `RaggedTensor` or `SparseTensor` of any shape, items to be sampled.
        freq_vocab: `Counter` with frequencies vocabulary.
        replacement: `string`, value to set instead of downsampled ones
        threshold: `float`, items occurrence threshold.
        min_freq: `int`, items below that frequency will be treated as unique.
        seed: `int`, used to create a random seed (optional).
            See @{tf.random.set_seed} for behavior.
        name: `string`, a name for the operation (optional).

    Returns:
      A boolean `Tensor` of same shape as source: "keep" flags.
    """
    with tf.name_scope(name or 'down_sample'):
        if isinstance(source, sparse_tensor.SparseTensorValue) or isinstance(source, sparse_tensor.SparseTensor):
            source = sparse_tensor.convert_to_tensor_or_sparse_tensor(source, dtype=tf.string, name=name)
        else:
            source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, dtype=tf.string, name=name)

        if not tf.string.is_compatible_with(source.dtype):
            raise RuntimeError('"Source" must have dtype compatible with "string". '
                               'Actual: {}'.format(source.dtype))

        if isinstance(source, tf.SparseTensor):
            return tf.SparseTensor(
                values=down_sample(source.values, freq_vocab, replacement, threshold, min_freq, seed),
                indices=source.indices,
                dense_shape=source.dense_shape
            )
        elif isinstance(source, tf.RaggedTensor):
            return source.with_flat_values(
                down_sample(source.flat_values, freq_vocab, replacement, threshold, min_freq, seed)
            )

        keep = sample_mask(
            source=source,
            freq_vocab=freq_vocab,
            threshold=threshold,
            min_freq=min_freq,
            seed=seed,
        )

        return tf.where(keep, source, replacement)
Example #25
0
def _check_and_reshape_dense_labels(labels, expected_labels_dimension):
  """Checks dense labels type and shape and reshapes to 2D Tensor."""
  if labels is None:
    raise ValueError(
        'You must provide a labels Tensor. Given: None. '
        'Suggested troubleshooting steps: Check that your data contain '
        'your label feature. Check that your input_fn properly parses and '
        'returns labels.')
  with ops.name_scope(None, 'labels', (labels,)) as scope:
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      raise ValueError(
          'SparseTensor labels are not supported. '
          'labels must be a Tensor of shape [batch_size, %s]. '
          'Suggested Fix (1): Check the label feature in your data. '
          'Each example must contain %s value(s). If not, your choice of label '
          'was probably incorrect. '
          'Suggested Fix (2): In your input_fn, use '
          'tf.sparse_tensor_to_dense() to turn labels into a Tensor.'
          '' % (expected_labels_dimension, expected_labels_dimension))
    labels = _maybe_expand_dim(labels)
    labels_shape = array_ops.shape(labels)
    err_msg = 'labels shape must be [batch_size, {}]'.format(
        expected_labels_dimension)
    assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
    with ops.control_dependencies([assert_rank]):
      static_shape = labels.shape
      if static_shape is not None:
        dim1 = static_shape[1]
        if (dim1 is not None) and (dim1 != expected_labels_dimension):
          raise ValueError(
              'Mismatched label shape. '
              'Classifier configured with n_classes=%s.  Received %s. '
              'Suggested Fix: check your n_classes argument to the estimator '
              'and/or the shape of your label.' %
              (expected_labels_dimension, dim1))
      assert_dimension = check_ops.assert_equal(
          expected_labels_dimension, labels_shape[1], message=err_msg)
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(labels, name=scope)
Example #26
0
def _check_labels(labels, expected_labels_dimension):
  """Check labels type and shape."""
  with ops.name_scope(None, 'labels', (labels,)) as scope:
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      raise ValueError('SparseTensor labels are not supported.')
    labels_shape = array_ops.shape(labels)
    err_msg = 'labels shape must be [batch_size, {}]'.format(
        expected_labels_dimension)
    assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
    with ops.control_dependencies([assert_rank]):
      static_shape = labels.shape
      if static_shape is not None:
        dim1 = static_shape[1]
        if (dim1 is not None) and (dim1 != expected_labels_dimension):
          raise ValueError(
              'labels shape must be [batch_size, labels_dimension], got %s.' %
              (static_shape,))
      assert_dimension = check_ops.assert_equal(
          expected_labels_dimension, labels_shape[1], message=err_msg)
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(labels, name=scope)
Example #27
0
def _check_labels(labels, expected_labels_dimension):
  """Check labels type and shape."""
  with ops.name_scope(None, 'labels', (labels,)) as scope:
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      raise ValueError('SparseTensor labels are not supported.')
    labels_shape = array_ops.shape(labels)
    err_msg = 'labels shape must be [batch_size, {}]'.format(
        expected_labels_dimension)
    assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
    with ops.control_dependencies([assert_rank]):
      static_shape = labels.shape
      if static_shape is not None:
        dim1 = static_shape[1]
        if (dim1 is not None) and (dim1 != expected_labels_dimension):
          raise ValueError(
              'labels shape must be [batch_size, labels_dimension], got %s.' %
              (static_shape,))
      assert_dimension = check_ops.assert_equal(
          expected_labels_dimension, labels_shape[1], message=err_msg)
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(labels, name=scope)
Example #28
0
def set_size(a, validate_indices=True):
  """Compute number of unique elements along last dimension of `a`.

  Args:
    a: `SparseTensor`, with indices sorted in row-major order.
    validate_indices: Whether to validate the order and range of sparse indices
       in `a`.

  Returns:
    `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
    rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
    number of unique elements in the corresponding `[0...n-1]` dimension of `a`.

  Raises:
    TypeError: If `a` is an invalid types.
  """
  a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
  if not isinstance(a, sparse_tensor.SparseTensor):
    raise TypeError("Expected `SparseTensor`, got %s." % a)
  if a.values.dtype.base_dtype not in _VALID_DTYPES:
    raise TypeError("Invalid dtype %s." % a.values.dtype)
  # pylint: disable=protected-access
  return gen_set_ops.set_size(a.indices, a.values, a.shape, validate_indices)
Example #29
0
def _check_dense_labels_match_logits_and_reshape(
    labels, logits, expected_labels_dimension):
  """Checks that labels shape matches logits and reshapes if needed.

  Consider logits of shape [D0, D1, ... DN, logits_dimension]. Then labels
  shape must be [D0, D1, ... DN, expected_labels_dimension].
  If expected_labels_dimension=1, labels could be [D0, D1, ... DN] and this
  method reshapes them to [D0, D1, ... DN, 1].

  Args:
    labels: labels Tensor.
    logits: logits Tensor.
    expected_labels_dimension: Integer.
  Returns:
    Validated and reshaped labels Tensor.
  Raises:
    ValueError: If labels is a SparseTensor.
    ValueError: If labels shape is statically defined and fails validation.
    OpError: If labels shape is not statically defined and fails validation.
  """
  if labels is None:
    raise ValueError(
        'You must provide a labels Tensor. Given: None. '
        'Suggested troubleshooting steps: Check that your data contain '
        'your label feature. Check that your input_fn properly parses and '
        'returns labels.')
  with ops.name_scope(None, 'labels', (labels, logits)) as scope:
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      raise ValueError(
          'SparseTensor labels are not supported. '
          'labels must be a Tensor of shape [D0, D1, ..., DN, %s], '
          'e.g. [batch_size, %s]. '
          'Suggested Fix (1): Check the label feature in your data. '
          'Each example must contain %s value(s). If not, your choice of label '
          'was probably incorrect. '
          'Suggested Fix (2): In your input_fn, use '
          'tf.sparse_tensor_to_dense() to turn labels into a Tensor.'
          '' % (expected_labels_dimension, expected_labels_dimension,
                expected_labels_dimension))
    if (labels.shape.ndims is not None and logits.shape.ndims is not None and
        labels.shape.ndims == logits.shape.ndims - 1):
      labels = array_ops.expand_dims(labels, -1)
    labels_shape = array_ops.shape(labels)
    logits_shape = array_ops.shape(logits)
    err_msg = (
        'labels shape must be [D0, D1, ... DN, {}]. '
        'Suggested Fix: check your n_classes argument to the estimator '
        'and/or the shape of your label.'.format(expected_labels_dimension))
    assert_rank = check_ops.assert_rank_at_least(labels, 2, message=err_msg)
    with ops.control_dependencies([assert_rank]):
      static_shape = labels.shape
      if static_shape.ndims is not None:
        dim1 = static_shape[-1]
        if (dim1 is not None) and (dim1 != expected_labels_dimension):
          raise ValueError(
              'Mismatched label shape. '
              'Classifier configured with n_classes=%s.  Received %s. '
              'Suggested Fix: check your n_classes argument to the estimator '
              'and/or the shape of your label.' %
              (expected_labels_dimension, dim1))
      expected_labels_shape = array_ops.concat(
          [logits_shape[:-1], [expected_labels_dimension]], axis=0)
      assert_dimension = check_ops.assert_equal(
          expected_labels_shape, labels_shape, message=err_msg,
          data=['expected_labels_shape: ', expected_labels_shape,
                'labels_shape: ', labels_shape])
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(labels, name=scope)
Example #30
0
def check_dense_labels_match_logits_and_reshape(labels, logits,
                                                expected_labels_dimension):
    """Checks labels shape matches logits, and reshapes if needed.

  Consider logits of shape [D0, D1, ... DN, logits_dimension]. Then labels
  shape must be [D0, D1, ... DN, expected_labels_dimension].
  If expected_labels_dimension=1, labels could be [D0, D1, ... DN] and this
  method reshapes them to [D0, D1, ... DN, 1].

  Args:
    labels: labels Tensor.
    logits: logits Tensor.
    expected_labels_dimension: Integer.

  Returns:
    Validated and reshaped labels Tensor.

  Raises:
    ValueError: If labels is a SparseTensor.
    ValueError: If labels shape is statically defined and fails validation.
    OpError: If labels shape is not statically defined and fails validation.
  """
    if labels is None:
        raise ValueError(_LABEL_NONE_ERR_MSG)
    with ops.name_scope('labels', values=(labels, logits)) as scope:
        labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
        if isinstance(labels, sparse_tensor.SparseTensor):
            raise ValueError(
                _SPARSE_LABEL_ERR_MSG.format(expected_labels_dimension,
                                             expected_labels_dimension,
                                             expected_labels_dimension))
        # Eager mode.
        if context.executing_eagerly():
            labels_rank = labels._rank()  # pylint: disable=protected-access
            logits_rank = logits._rank()  # pylint: disable=protected-access
            if (labels_rank is not None and logits_rank is not None
                    and labels_rank == logits_rank - 1):
                labels = array_ops.expand_dims(labels, -1)
                labels_rank += 1
            labels_shape = labels._shape_tuple()  # pylint: disable=protected-access
            if labels_rank < 2:
                raise ValueError(
                    'labels must have rank at least 2.  Received rank {}, '
                    'shape {}'.format(labels_rank, labels_shape))
            if labels_shape[-1] != expected_labels_dimension:
                raise ValueError(
                    _MISMATCHED_LABEL_DIM_ERR_MSG.format(
                        expected_labels_dimension, labels_shape[-1]))
            logits_shape = logits._shape_tuple()  # pylint: disable=protected-access
            expected_labels_shape = logits_shape[:-1] + (
                expected_labels_dimension, )
            if expected_labels_shape != labels_shape:
                raise ValueError(
                    '{}, expected_labels_shape: {}. labels_shape: {}.'.format(
                        _LABEL_SHAPE_ERR_MSG.format(expected_labels_dimension),
                        expected_labels_shape, labels_shape))
            return labels

        # Graph mode.
        if (labels.shape.ndims is not None and logits.shape.ndims is not None
                and labels.shape.ndims == logits.shape.ndims - 1):
            labels = array_ops.expand_dims(labels, -1)
        assert_rank = check_ops.assert_rank_at_least(
            labels,
            2,
            message=_LABEL_SHAPE_ERR_MSG.format(expected_labels_dimension))
        with ops.control_dependencies([assert_rank]):
            static_shape = labels.shape
            if static_shape.ndims is not None:
                final_dim = static_shape[-1]
                if (final_dim is not None) and (final_dim !=
                                                expected_labels_dimension):
                    raise ValueError(
                        _MISMATCHED_LABEL_DIM_ERR_MSG.format(
                            expected_labels_dimension, final_dim))
            logits_shape = array_ops.shape(logits)
            expected_labels_shape = array_ops.concat(
                [logits_shape[:-1], [expected_labels_dimension]], axis=0)
            labels_shape = array_ops.shape(labels)
            assert_dimension = check_ops.assert_equal(
                expected_labels_shape,
                labels_shape,
                message=_LABEL_SHAPE_ERR_MSG.format(expected_labels_dimension),
                data=[
                    'expected_labels_shape: ', expected_labels_shape,
                    'labels_shape: ', labels_shape
                ])
            with ops.control_dependencies([assert_dimension]):
                return array_ops.identity(labels, name=scope)
Example #31
0
def _check_dense_labels_match_logits_and_reshape(
    labels, logits, expected_labels_dimension):
  """Checks that labels shape matches logits and reshapes if needed.

  Consider logits of shape [D0, D1, ... DN, logits_dimension]. Then labels
  shape must be [D0, D1, ... DN, expected_labels_dimension].
  If expected_labels_dimension=1, labels could be [D0, D1, ... DN] and this
  method reshapes them to [D0, D1, ... DN, 1].

  Args:
    labels: labels Tensor.
    logits: logits Tensor.
    expected_labels_dimension: Integer.
  Returns:
    Validated and reshaped labels Tensor.
  Raises:
    ValueError: If labels is a SparseTensor.
    ValueError: If labels shape is statically defined and fails validation.
    OpError: If labels shape is not statically defined and fails validation.
  """
  if labels is None:
    raise ValueError(
        'You must provide a labels Tensor. Given: None. '
        'Suggested troubleshooting steps: Check that your data contain '
        'your label feature. Check that your input_fn properly parses and '
        'returns labels.')
  with ops.name_scope(None, 'labels', (labels, logits)) as scope:
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      raise ValueError(
          'SparseTensor labels are not supported. '
          'labels must be a Tensor of shape [D0, D1, ..., DN, %s], '
          'e.g. [batch_size, %s]. '
          'Suggested Fix (1): Check the label feature in your data. '
          'Each example must contain %s value(s). If not, your choice of label '
          'was probably incorrect. '
          'Suggested Fix (2): In your input_fn, use '
          'tf.sparse_tensor_to_dense() to turn labels into a Tensor.'
          '' % (expected_labels_dimension, expected_labels_dimension,
                expected_labels_dimension))
    if (labels.shape.ndims is not None and logits.shape.ndims is not None and
        labels.shape.ndims == logits.shape.ndims - 1):
      labels = array_ops.expand_dims(labels, -1)
    labels_shape = array_ops.shape(labels)
    logits_shape = array_ops.shape(logits)
    err_msg = (
        'labels shape must be [D0, D1, ... DN, {}]. '
        'Suggested Fix: check your n_classes argument to the estimator '
        'and/or the shape of your label.'.format(expected_labels_dimension))
    assert_rank = check_ops.assert_rank_at_least(labels, 2, message=err_msg)
    with ops.control_dependencies([assert_rank]):
      static_shape = labels.shape
      if static_shape.ndims is not None:
        dim1 = static_shape[-1]
        if (dim1 is not None) and (dim1 != expected_labels_dimension):
          raise ValueError(
              'Mismatched label shape. '
              'Classifier configured with n_classes=%s.  Received %s. '
              'Suggested Fix: check your n_classes argument to the estimator '
              'and/or the shape of your label.' %
              (expected_labels_dimension, dim1))
      expected_labels_shape = array_ops.concat(
          [logits_shape[:-1], [expected_labels_dimension]], axis=0)
      assert_dimension = check_ops.assert_equal(
          expected_labels_shape, labels_shape, message=err_msg,
          data=['expected_labels_shape: ', expected_labels_shape,
                'labels_shape: ', labels_shape])
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(labels, name=scope)
 def test_convert_dense(self):
   with self.test_session():
     value = [42, 43]
     from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
         value)
     self.assertAllEqual(value, from_value.eval())
Example #33
0
def _convert_tensor(x):
    """Create or cast tensor if needed."""
    if not tensor_util.is_tensor(x):
        # x is a numpy array
        x = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(x)
    return x
Example #34
0
 def test_convert_dense(self):
     value = [42, 43]
     from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(value)
     self.assertAllEqual(value, self.evaluate(from_value))
Example #35
0
    def __new__(cls,
                mode,
                predictions=None,
                loss=None,
                train_op=None,
                eval_metric_ops=None,
                output_alternatives=None,
                training_chief_hooks=None,
                training_hooks=None,
                scaffold=None):
        """Creates a validated `ModelFnOps` instance.

    For a multi-headed model, the predictions dict here will contain the outputs
    of all of the heads.  However: at serving time, requests will be made
    specifically for one or more heads, and the RPCs used for these requests may
    differ by problem type (i.e., regression, classification, other).  The
    purpose of the output_alternatives dict is to aid in exporting a SavedModel
    from which such head-specific queries can be served.  These
    output_alternatives will be combined with input_alternatives (see
    `saved_model_export_utils`) to produce a set of `SignatureDef`s specifying
    the valid requests that can be served from this model.

    For a single-headed model, it is still adviseable to provide
    output_alternatives with a single entry, because this is how the problem
    type is communicated for export and serving.  If output_alternatives is not
    given, the resulting SavedModel will support only one head of unspecified
    type.

    Args:
      mode: One of `ModeKeys`. Specifies if this training, evaluation or
        prediction.
      predictions: Predictions `Tensor` or dict of `Tensor`.
      loss: Training loss `Tensor`.
      train_op: Op for the training step.
      eval_metric_ops: Dict of metric results keyed by name. The values of the
        dict are the results of calling a metric function, such as `Tensor`.
      output_alternatives: a dict of
        `{submodel_name: (problem_type, {tensor_name: Tensor})}`, where
        `submodel_name` is a submodel identifier that should be consistent
        across the pipeline (here likely taken from the name of each `Head`,
        for models that use them), `problem_type` is a `ProblemType`,
        `tensor_name` is a symbolic name for an output Tensor possibly but not
        necessarily taken from `PredictionKey`, and `Tensor` is the
        corresponding output Tensor itself.
      training_chief_hooks: A list of `SessionRunHook` objects that will be
        run on the chief worker during training.
      training_hooks: A list of `SessionRunHook` objects that will be run on
        all workers during training.
      scaffold: A `tf.train.Scaffold` object that can be used to set
        initialization, saver, and more to be used in training.

    Returns:
      A validated `ModelFnOps` object.

    Raises:
      ValueError: If validation fails.
    """
        ModeKeys.validate(mode)

        # Assert all ops are from the same graph.
        get_graph_from_inputs((predictions, loss, train_op))

        # Validate train_op.
        if train_op is None:
            if mode == ModeKeys.TRAIN:
                raise ValueError('Missing train_op.')
        elif not isinstance(train_op, ops.Operation):
            # TODO(ptucker): Should this be allowed? Consider raising error.
            train_op = ops.convert_to_tensor(train_op).op

        # Validate loss.
        if loss is None:
            if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
                raise ValueError('Missing loss.')
        else:
            loss = ops.convert_to_tensor(loss)
            loss_shape = loss.get_shape()
            if loss_shape.num_elements() not in (None, 1):
                raise ValueError('Loss must be scalar: %s.' % loss)
            if not loss_shape.is_compatible_with(tensor_shape.scalar()):
                loss = array_ops.reshape(loss, [])

        # Validate predictions.
        if predictions is None:
            if mode == ModeKeys.INFER or mode == ModeKeys.EVAL:
                raise ValueError('Missing predictions.')
        else:
            if isinstance(predictions, dict):
                predictions = {
                    k: sparse_tensor.convert_to_tensor_or_sparse_tensor(v)
                    for k, v in six.iteritems(predictions)
                }
            else:
                predictions = sparse_tensor.convert_to_tensor_or_sparse_tensor(
                    predictions)

        # Validate eval_metric_ops
        if eval_metric_ops is None:
            eval_metric_ops = {}
        else:
            if not isinstance(eval_metric_ops, dict):
                raise ValueError('eval_metric_ops must be a dict.')

        # Validate hooks
        if training_chief_hooks is None:
            training_chief_hooks = []
        if training_hooks is None:
            training_hooks = []
        for hook in training_hooks + training_chief_hooks:
            if not isinstance(hook, session_run_hook.SessionRunHook):
                raise TypeError(
                    'All hooks returned from model_fn must be '
                    'SessionRunHook instances, got instance of %s: %s' %
                    (type(hook), hook))

        return super(ModelFnOps,
                     cls).__new__(cls,
                                  predictions=predictions,
                                  loss=loss,
                                  train_op=train_op,
                                  eval_metric_ops=eval_metric_ops,
                                  output_alternatives=output_alternatives,
                                  training_chief_hooks=training_chief_hooks,
                                  training_hooks=training_hooks,
                                  scaffold=scaffold,
                                  mode=mode)
Example #36
0
 def test_convert_dense(self):
     with self.test_session():
         value = [42, 43]
         from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
             value)
         self.assertAllEqual(value, from_value.eval())
Example #37
0
  def __new__(cls,
              mode,
              predictions=None,
              loss=None,
              train_op=None,
              eval_metric_ops=None,
              output_alternatives=None,
              training_chief_hooks=None,
              training_hooks=None,
              scaffold=None):
    """Creates a validated `ModelFnOps` instance.

    For a multi-headed model, the predictions dict here will contain the outputs
    of all of the heads.  However: at serving time, requests will be made
    specifically for one or more heads, and the RPCs used for these requests may
    differ by problem type (i.e., regression, classification, other).  The
    purpose of the output_alternatives dict is to aid in exporting a SavedModel
    from which such head-specific queries can be served.  These
    output_alternatives will be combined with input_alternatives (see
    `saved_model_export_utils`) to produce a set of `SignatureDef`s specifying
    the valid requests that can be served from this model.

    For a single-headed model, it is still adviseable to provide
    output_alternatives with a single entry, because this is how the problem
    type is communicated for export and serving.  If output_alternatives is not
    given, the resulting SavedModel will support only one head of unspecified
    type.

    Args:
      mode: One of `ModeKeys`. Specifies if this training, evaluation or
        prediction.
      predictions: Predictions `Tensor` or dict of `Tensor`.
      loss: Training loss `Tensor`.
      train_op: Op for the training step.
      eval_metric_ops: Dict of metric results keyed by name. The values of the
        dict are the results of calling a metric function, such as `Tensor`.
      output_alternatives: a dict of
        `{submodel_name: (problem_type, {tensor_name: Tensor})}`, where
        `submodel_name` is a submodel identifier that should be consistent
        across the pipeline (here likely taken from the name of each `Head`,
        for models that use them), `problem_type` is a `ProblemType`,
        `tensor_name` is a symbolic name for an output Tensor possibly but not
        necessarily taken from `PredictionKey`, and `Tensor` is the
        corresponding output Tensor itself.
      training_chief_hooks: A list of `SessionRunHook` objects that will be
        run on the chief worker during training.
      training_hooks: A list of `SessionRunHook` objects that will be run on
        all workers during training.
      scaffold: A `tf.train.Scaffold` object that can be used to set
        initialization, saver, and more to be used in training.

    Returns:
      A validated `ModelFnOps` object.

    Raises:
      ValueError: If validation fails.
    """
    ModeKeys.validate(mode)

    # Assert all ops are from the same graph.
    get_graph_from_inputs((predictions, loss, train_op))

    # Validate train_op.
    if train_op is None:
      if mode == ModeKeys.TRAIN:
        raise ValueError('Missing train_op.')
    elif not isinstance(train_op, ops.Operation):
      # TODO(ptucker): Should this be allowed? Consider raising error.
      train_op = ops.convert_to_tensor(train_op).op

    # Validate loss.
    if loss is None:
      if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
        raise ValueError('Missing loss.')
    else:
      loss = ops.convert_to_tensor(loss)
      loss_shape = loss.get_shape()
      if loss_shape.num_elements() not in (None, 1):
        raise ValueError('Loss must be scalar: %s.' % loss)
      if not loss_shape.is_compatible_with(tensor_shape.scalar()):
        loss = array_ops.reshape(loss, [])

    # Validate predictions.
    if predictions is None:
      if mode == ModeKeys.INFER or mode == ModeKeys.EVAL:
        raise ValueError('Missing predictions.')
    else:
      if isinstance(predictions, dict):
        predictions = {
            k: sparse_tensor.convert_to_tensor_or_sparse_tensor(v)
            for k, v in six.iteritems(predictions)
        }
      else:
        predictions = sparse_tensor.convert_to_tensor_or_sparse_tensor(
            predictions)

    # Validate eval_metric_ops
    if eval_metric_ops is None:
      eval_metric_ops = {}
    else:
      if not isinstance(eval_metric_ops, dict):
        raise ValueError('eval_metric_ops must be a dict.')

    # Validate hooks
    if training_chief_hooks is None:
      training_chief_hooks = []
    if training_hooks is None:
      training_hooks = []
    for hook in training_hooks + training_chief_hooks:
      if not isinstance(hook, session_run_hook.SessionRunHook):
        raise TypeError('All hooks returned from model_fn must be '
                        'SessionRunHook instances, got instance of %s: %s' %
                        (type(hook), hook))

    return super(ModelFnOps, cls).__new__(
        cls,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric_ops,
        output_alternatives=output_alternatives,
        training_chief_hooks=training_chief_hooks,
        training_hooks=training_hooks,
        scaffold=scaffold,
        mode=mode)