def test_raises_when_zero(self):
   with self.test_session():
     claire = constant_op.constant([0], name="claire")
     with ops.control_dependencies([check_ops.assert_negative(claire)]):
       out = array_ops.identity(claire)
     with self.assertRaisesOpError("claire"):
       out.eval()
Example #2
0
 def test_raises_when_positive(self):
     with self.test_session():
         doug = constant_op.constant([1, 2], name="doug")
         with ops.control_dependencies(
             [check_ops.assert_negative(doug, message="fail")]):
             out = array_ops.identity(doug)
         with self.assertRaisesOpError("fail.*doug"):
             out.eval()
 def test_raises_when_positive(self):
   doug = constant_op.constant([1, 2], name="doug")
   with self.assertRaisesOpError("fail"):
     with ops.control_dependencies(
         [check_ops.assert_negative(
             doug, message="fail")]):
       out = array_ops.identity(doug)
     self.evaluate(out)
 def test_empty_tensor_doesnt_raise(self):
   # A tensor is negative when it satisfies:
   #   For every element x_i in x, x_i < 0
   # and an empty tensor has no elements, so this is trivially satisfied.
   # This is standard set theory.
   empty = constant_op.constant([], name="empty")
   with ops.control_dependencies([check_ops.assert_negative(empty)]):
     out = array_ops.identity(empty)
   self.evaluate(out)
 def test_empty_tensor_doesnt_raise(self):
   # A tensor is negative when it satisfies:
   #   For every element x_i in x, x_i < 0
   # and an empty tensor has no elements, so this is trivially satisfied.
   # This is standard set theory.
   with self.test_session():
     empty = constant_op.constant([], name="empty")
     with ops.control_dependencies([check_ops.assert_negative(empty)]):
       out = array_ops.identity(empty)
     out.eval()
 def test_doesnt_raise_when_negative(self):
   with self.test_session():
     frank = constant_op.constant([-1, -2], name="frank")
     with ops.control_dependencies([check_ops.assert_negative(frank)]):
       out = array_ops.identity(frank)
     out.eval()
Example #7
0
def stack_dynamic_partitions(data, partitions, num_partitions, name=None):
    """Stacks dynamic partitions of a Tensor or RaggedTensor.

  Returns a RaggedTensor `output` with `num_partitions` rows, where the row
  `output[i]` is formed by stacking all slices `data[j1...jN]` such that
  `partitions[j1...jN] = i`.  Slices of `data` are stacked in row-major
  order.

  If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to
  `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`.

  #### Example:

  >>> data           = ['a', 'b', 'c', 'd', 'e']
  >>> partitions     = [  3,   0,   2,   2,   3]
  >>> num_partitions = 5
  >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions)
  <tf.RaggedTensor [[b'b'], [], [b'c', b'd'], [b'a', b'e'], []]>

  Args:
    data: A `Tensor` or `RaggedTensor` containing the values to stack.
    partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the
      partition that each slice of `data` should be added to.
      `partitions.shape` must be a prefix of `data.shape`.  Values must be
      greater than or equal to zero, and less than `num_partitions`.
      `partitions` is not required to be sorted.
    num_partitions: An `int32` or `int64` scalar specifying the number of
      partitions to output.  This determines the number of rows in `output`.
    name: A name prefix for the returned tensor (optional).

  Returns:
    A `RaggedTensor` containing the stacked partitions.  The returned tensor
    has the same dtype as `data`, and its shape is
    `[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a
    ragged dimension whose length is the number of data slices stacked for
    each `partition`.
  """
    with ops.name_scope(name, 'SegmentStack',
                        [data, partitions, num_partitions]):
        # Convert inputs to tensors.
        data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data,
                                                                name='data')
        row_splits_dtype = (data.row_splits.dtype if isinstance(
            data, ragged_tensor.RaggedTensor) else None)
        partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor(
            partitions, name='partitions', preferred_dtype=row_splits_dtype)
        num_partitions = ops.convert_to_tensor(
            num_partitions,
            name='num_partitions',
            preferred_dtype=partitions.dtype)
        if row_splits_dtype is not None:
            partitions = math_ops.cast(partitions, row_splits_dtype)
        num_partitions = math_ops.cast(num_partitions, partitions.dtype)

        # Sanity-checks for shapes.
        partitions_rank = partitions.shape.ndims
        if partitions_rank is None:
            raise ValueError('partitions must have known rank.')
        num_partitions.shape.assert_has_rank(0)
        partitions.shape.assert_is_compatible_with(
            data.shape[:partitions_rank])

        if partitions_rank == 0:
            # If partitions is a scalar, then just create a RaggedTensor containing
            # that single the complete `data` value in the specified row.
            return ragged_tensor.RaggedTensor.from_value_rowids(
                values=array_ops.stack([data]),
                value_rowids=array_ops.stack([partitions]),
                nrows=num_partitions,
                validate=False)

        elif partitions_rank == 1:
            # If partitions is a vector (the typical case): we can just use data and
            # partitions as the `values` and `value_rowids` for `from_value_rowids`,
            # as long as we sort them first.
            permutation = sort_ops.argsort(partitions, stable=True)
            value_rowids = array_ops.gather(partitions, permutation)
            values = array_ops.gather(data, permutation)
            check = check_ops.assert_negative(
                value_rowids[-1:] - num_partitions,
                message='partitions must be less than num_partitions')
            with ops.control_dependencies([check]):
                return ragged_tensor.RaggedTensor.from_value_rowids(
                    values, value_rowids, nrows=num_partitions, validate=False)

        else:
            # Handle higher-dimensional partitions via recursion.
            if not isinstance(data, ragged_tensor.RaggedTensor):
                data = ragged_tensor.RaggedTensor.from_tensor(
                    data, row_splits_dtype=partitions.dtype, ragged_rank=1)
            if not isinstance(partitions, ragged_tensor.RaggedTensor):
                partitions = ragged_tensor.RaggedTensor.from_tensor(
                    partitions,
                    row_splits_dtype=partitions.dtype,
                    ragged_rank=max(data.ragged_rank, partitions_rank - 1))
            check = check_ops.assert_equal(
                data.row_splits,
                partitions.row_splits,
                message='data and partitions have incompatible ragged shapes')
            with ops.control_dependencies([check]):
                return stack_dynamic_partitions(data.values, partitions.values,
                                                num_partitions)
 def test_raises_when_zero(self):
   claire = constant_op.constant([0], name="claire")
   with self.assertRaisesOpError("x < 0 did not hold"):
     with ops.control_dependencies([check_ops.assert_negative(claire)]):
       out = array_ops.identity(claire)
     self.evaluate(out)