Exemple #1
0
def segment_mean(data, segment_ids, num_segments, name=None):
    # For docs, see: _RAGGED_SEGMENT_DOCSTRING
    with ops.name_scope(name, 'RaggedSegmentMean',
                        [data, segment_ids, num_segments]):
        total = segment_sum(data, segment_ids, num_segments)
        ones = ragged_factory_ops.from_nested_row_splits(
            array_ops.ones_like(data.inner_values), data.nested_row_splits)
        count = segment_sum(ones, segment_ids, num_segments)
        return ragged_factory_ops.from_nested_row_splits(
            total.inner_values / count.inner_values, total.nested_row_splits)
def segment_mean(data, segment_ids, num_segments, name=None):
  # For docs, see: _RAGGED_SEGMENT_DOCSTRING
  with ops.name_scope(name, 'RaggedSegmentMean',
                      [data, segment_ids, num_segments]):
    total = segment_sum(data, segment_ids, num_segments)
    ones = ragged_factory_ops.from_nested_row_splits(
        array_ops.ones_like(data.inner_values), data.nested_row_splits)
    count = segment_sum(ones, segment_ids, num_segments)
    return ragged_factory_ops.from_nested_row_splits(
        total.inner_values / count.inner_values, total.nested_row_splits)
def reduce_mean(rt_input, axis=None, name=None):
  """For docs, see: _RAGGED_REDUCE_DOCSTRING."""
  with ops.name_scope(name, 'RaggedReduceMean', [rt_input, axis]):
    total = reduce_sum(rt_input, axis)
    if ragged_tensor.is_ragged(rt_input):
      ones = ragged_factory_ops.from_nested_row_splits(
          array_ops.ones_like(rt_input.inner_values),
          rt_input.nested_row_splits)
    else:
      ones = array_ops.ones_like(rt_input)
    count = reduce_sum(ones, axis)
    if ragged_tensor.is_ragged(total):
      return ragged_factory_ops.from_nested_row_splits(
          total.inner_values / count.inner_values, total.nested_row_splits)
    else:
      return total / count
 def testRaggedMatrixWithMultiDimensionInnerValues(self, encoding):
   test_inner_values = constant_op.constant([[[72, 101, 108, 108, 111],
                                              [87, 111, 114, 108, 100]],
                                             [[102, 105, 120, 101, 100],
                                              [119, 111, 114, 100, 115]],
                                             [[72, 121, 112, 101, 114],
                                              [99, 117, 98, 101, 46]]])
   test_row_splits = [
       constant_op.constant([0, 2, 3], dtype=np.int64),
       constant_op.constant([0, 1, 1, 3], dtype=np.int64)
   ]
   test_value = ragged_factory_ops.from_nested_row_splits(test_inner_values,
                                                          test_row_splits)
   expected_value = [[[[u"Hello".encode(encoding), u"World".encode(encoding)]],
                      []],
                     [[[u"fixed".encode(encoding), u"words".encode(encoding)],
                       [u"Hyper".encode(encoding),
                        u"cube.".encode(encoding)]]]]
   unicode_encode_op = ragged_string_ops.unicode_encode(test_value, encoding)
   with self.cached_session():
     result = unicode_encode_op.eval()
     self.assertEqual(unicode_encode_op.ragged_rank, 2)
     self.assertAllEqual(result.tolist(), expected_value)
     # These next two assertions don't necessarily need to be here as they test
     # internal representations and we already verified the value is correct.
     self.assertAllEqual(len(result.nested_row_splits), len(test_row_splits))
     self.assertEqual(unicode_encode_op.inner_values.shape.ndims,
                      test_inner_values.shape.ndims - 1)
  def ragged_op(*args, **kwargs):
    """Ragged version of `op`."""
    args = list(args)

    # Collect all of the elementwise arguments, and put them in a single
    # dict whose values are the (potentially ragged) tensors that need to
    # be broadcast to a common shape.  The keys of this dict are tuples
    # (argkey, index), where argkey is an int for poitional args or a string
    # for keyword args; and index is None for non-list args and the index of the
    # tensor for list args.
    elementwise_args = {}
    for (name, position, is_list) in elementwise_arg_infos.values():
      if position < len(args):
        if is_list:
          args[position] = list(args[position])
          for (index, arg) in enumerate(args[position]):
            elementwise_args[position, index] = arg
        else:
          elementwise_args[position, None] = args[position]
      elif name in kwargs:
        if is_list:
          kwargs[name] = list(kwargs[name])
          for (i, arg) in enumerate(kwargs[name]):
            elementwise_args[name, i] = arg
        else:
          elementwise_args[name, None] = kwargs[name]

    with ops.name_scope(None, op.__name__, elementwise_args.values()):
      # Convert all inputs to tensors or ragged tensors.
      for ((key, index), tensor) in elementwise_args.items():
        argname = elementwise_arg_infos[key].name
        converted = ragged_factory_ops.convert_to_tensor_or_ragged_tensor(
            tensor, name=argname)
        elementwise_args[key, index] = converted

      # Broadcast tensors to have compatible shapes.
      broadcast_args, result_splits, broadcast_check_ops = \
          _broadcast_elementwise_args(elementwise_args)

      # Replace tensor arguments with their dense values.
      for ((key, index), tensor) in broadcast_args.items():
        if ragged_tensor.is_ragged(tensor):
          if isinstance(key, int) and index is None:
            args[key] = tensor.inner_values
          elif isinstance(key, int) and index is not None:
            args[key][index] = tensor.inner_values
          elif isinstance(key, str) and index is None:
            kwargs[key] = tensor.inner_values
          else:
            assert isinstance(key, str) and index is not None
            kwargs[key][index] = tensor.inner_values

      # Call the elementwise op on the broadcasted dense values.
      with ops.control_dependencies(broadcast_check_ops):
        result_values = op(*args, **kwargs)

      # Restore any ragged dimensions that we stripped off, and return the
      # result.
      return ragged_factory_ops.from_nested_row_splits(result_values,
                                                       result_splits)
    def ragged_op(*args, **kwargs):
        """Ragged version of `op`."""
        args = list(args)

        # Collect all of the elementwise arguments, and put them in a single
        # dict whose values are the (potentially ragged) tensors that need to
        # be broadcast to a common shape.  The keys of this dict are tuples
        # (argkey, index), where argkey is an int for poitional args or a string
        # for keyword args; and index is None for non-list args and the index of the
        # tensor for list args.
        elementwise_args = {}
        for (name, position, is_list) in elementwise_arg_infos.values():
            if position < len(args):
                if is_list:
                    args[position] = list(args[position])
                    for (index, arg) in enumerate(args[position]):
                        elementwise_args[position, index] = arg
                else:
                    elementwise_args[position, None] = args[position]
            elif name in kwargs:
                if is_list:
                    kwargs[name] = list(kwargs[name])
                    for (i, arg) in enumerate(kwargs[name]):
                        elementwise_args[name, i] = arg
                else:
                    elementwise_args[name, None] = kwargs[name]

        with ops.name_scope(None, op.__name__, elementwise_args.values()):
            # Convert all inputs to tensors or ragged tensors.
            for ((key, index), tensor) in elementwise_args.items():
                argname = elementwise_arg_infos[key].name
                converted = ragged_factory_ops.convert_to_tensor_or_ragged_tensor(
                    tensor, name=argname)
                elementwise_args[key, index] = converted

            # Broadcast tensors to have compatible shapes.
            broadcast_args, result_splits, broadcast_check_ops = \
                _broadcast_elementwise_args(elementwise_args)

            # Replace tensor arguments with their dense values.
            for ((key, index), tensor) in broadcast_args.items():
                if ragged_tensor.is_ragged(tensor):
                    if isinstance(key, int) and index is None:
                        args[key] = tensor.inner_values
                    elif isinstance(key, int) and index is not None:
                        args[key][index] = tensor.inner_values
                    elif isinstance(key, str) and index is None:
                        kwargs[key] = tensor.inner_values
                    else:
                        assert isinstance(key, str) and index is not None
                        kwargs[key][index] = tensor.inner_values

            # Call the elementwise op on the broadcasted dense values.
            with ops.control_dependencies(broadcast_check_ops):
                result_values = op(*args, **kwargs)

            # Restore any ragged dimensions that we stripped off, and return the
            # result.
            return ragged_factory_ops.from_nested_row_splits(
                result_values, result_splits)
Exemple #7
0
 def testRaggedMatrixWithMultiDimensionInnerValues(self, encoding):
     test_inner_values = constant_op.constant([[[72, 101, 108, 108, 111],
                                                [87, 111, 114, 108, 100]],
                                               [[102, 105, 120, 101, 100],
                                                [119, 111, 114, 100, 115]],
                                               [[72, 121, 112, 101, 114],
                                                [99, 117, 98, 101, 46]]])
     test_row_splits = [
         constant_op.constant([0, 2, 3], dtype=np.int64),
         constant_op.constant([0, 1, 1, 3], dtype=np.int64)
     ]
     test_value = ragged_factory_ops.from_nested_row_splits(
         test_inner_values, test_row_splits)
     expected_value = [
         [[[u"Hello".encode(encoding), u"World".encode(encoding)]], []],
         [[[u"fixed".encode(encoding), u"words".encode(encoding)],
           [u"Hyper".encode(encoding), u"cube.".encode(encoding)]]]
     ]
     unicode_encode_op = ragged_string_ops.unicode_encode(
         test_value, encoding)
     with self.cached_session():
         result = unicode_encode_op.eval()
         self.assertEqual(unicode_encode_op.ragged_rank, 2)
         self.assertAllEqual(result.tolist(), expected_value)
         # These next two assertions don't necessarily need to be here as they test
         # internal representations and we already verified the value is correct.
         self.assertAllEqual(len(result.nested_row_splits),
                             len(test_row_splits))
         self.assertEqual(unicode_encode_op.inner_values.shape.ndims,
                          test_inner_values.shape.ndims - 1)
Exemple #8
0
def reduce_mean(input_tensor, axis=None, keepdims=None, name=None):
    """For docs, see: _RAGGED_REDUCE_DOCSTRING."""
    with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]):
        total = reduce_sum(input_tensor, axis, keepdims)
        if ragged_tensor.is_ragged(input_tensor):
            ones = ragged_factory_ops.from_nested_row_splits(
                array_ops.ones_like(input_tensor.inner_values),
                input_tensor.nested_row_splits)
        else:
            ones = array_ops.ones_like(input_tensor)
        count = reduce_sum(ones, axis, keepdims)
        if ragged_tensor.is_ragged(total):
            return ragged_factory_ops.from_nested_row_splits(
                total.inner_values / count.inner_values,
                total.nested_row_splits)
        else:
            return total / count
Exemple #9
0
def map_inner_values(op, *args, **kwargs):
    """Applies `op` to the inner values of one or more RaggedTensors.

  Replaces any `RaggedTensor` in `args` or `kwargs` with its `inner_values`
  tensor, and then calls `op`.  Returns a `RaggedTensor` that is constructed
  from the input `RaggedTensor`s' `splits` and the value returned by
  the `op`.

  If the input arguments contain multiple `RaggedTensor`s, then they must have
  identical `splits`.

  Examples:

  ```python
  >>> rt = ragged.constant([[1, 2, 3], [], [4, 5], [6]])
  >>> ragged.map_inner_values(tf.ones_like, rt).eval().tolist()
  [[1, 1, 1], [], [1, 1], [1]]
  >>> ragged.map_inner_values(tf.multiply, rt, rt).eval().tolist()
  [[1, 4, 9], [], [16, 25], [36]]
  >>> ragged.map_inner_values(tf.add, rt, 5).eval().tolist()
  [[6, 7, 8], [], [9, 10], [11]]
  ```

  Args:
    op: The operation that should be applied to the RaggedTensor `inner_values`.
      `op` is typically an element-wise operation (such as math_ops.add), but
      any operation that preserves the size of the outermost dimension can be
      used.  I.e., `shape[0]` of the value returned by `op` must match
      `shape[0]` of the `RaggedTensor`s' `inner_values` tensors.
    *args: Arguments for `op`.
    **kwargs: Keyword arguments for `op`.

  Returns:
    A `RaggedTensor` whose `ragged_rank` matches the `ragged_rank` of all
    input `RaggedTensor`s.
  Raises:
    ValueError: If args contains no `RaggedTensors`, or if the `nested_splits`
      of the input `RaggedTensor`s are not identical.
  """
    # Replace RaggedTensors with their values; and collect the splits tensors
    # from each RaggedTensor.
    nested_splits_lists = []
    inner_args = _replace_ragged_with_inner_values(args, nested_splits_lists)
    inner_kwargs = _replace_ragged_with_inner_values(kwargs,
                                                     nested_splits_lists)
    if not nested_splits_lists:
        return op(*args, **kwargs)

    with ops.control_dependencies(
            ragged_util.assert_splits_match(nested_splits_lists)):
        # Delegate to op, and then compose the result from the transformed values
        # and the splits.
        return ragged_factory_ops.from_nested_row_splits(
            op(*inner_args, **inner_kwargs), nested_splits_lists[0])
 def handle(self, args, kwargs):
     if args:
         x, args = args[0], args[1:]
     else:
         kwargs = kwargs.copy()
         x = kwargs.pop(self._x, None)
     if x is None:
         return self.NOT_SUPPORTED
     if self._arg_is_list:
         found_ragged = False
         for elt in x:
             if ragged_tensor.is_ragged(elt):
                 found_ragged = True
             elif not _is_convertible_to_tensor(elt):
                 return self.NOT_SUPPORTED
         if found_ragged:
             nested_splits_lists = [
                 elt.nested_row_splits for elt in x
                 if ragged_tensor.is_ragged(elt)
             ]
             inner_values = [
                 elt.inner_values if ragged_tensor.is_ragged(elt) else elt
                 for elt in x
             ]
             with ops.control_dependencies(
                     ragged_util.assert_splits_match(nested_splits_lists)):
                 return ragged_factory_ops.from_nested_row_splits(
                     self._original_op(inner_values, *args, **kwargs),
                     nested_splits_lists[0])
         else:
             return self.NOT_SUPPORTED
     else:
         found_ragged = ragged_tensor.is_ragged(x)
         if found_ragged:
             mapped_values = self._original_op(x.inner_values, *args,
                                               **kwargs)
             return x.with_inner_values(mapped_values)
         else:
             return self.NOT_SUPPORTED
def from_tensor(tensor, lengths=None, padding=None, ragged_rank=1, name=None):
    """Converts a `Tensor` into a `RaggedTensor`.

  The set of absent/default values may be specified using a vector of lengths
  or a padding value (but not both).  If `lengths` is specified, then the
  output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`.
  If `padding` is specified, then any row *suffix* consisting entirely of
  `padding` will be excluded from the returned `RaggedTensor`.  If neither
  `lengths` nor `padding` is specified, then the returned `RaggedTensor` will
  have no absent/default values.

  Examples:

  ```python
  >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
  >>> ragged.from_tensor(dt).eval().tolist()
  [[5, 7, 0], [0, 3, 0], [6, 0, 0]]
  >>> ragged.from_tensor(dt, lengths=[2, 0, 3]).eval().tolist()
  [[5, 7], [], [6, 0, 0]]
  >>> ragged.from_tensor(dt, padding=0).eval().tolist()
  [[5, 7], [0, 3], [6]]
  ```

  Args:
    tensor: The `Tensor` to convert.  Must have rank `ragged_rank + 1` or
      higher.
    lengths: An optional set of row lengths, specified using a 1-D integer
      `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows in
      `tensor`).  If specified, then `output[row]` will contain
      `tensor[row][:lengths[row]]`.  Negative lengths are treated as zero.
    padding: An optional padding value.  If specified, then any row suffix
      consisting entirely of `padding` will be excluded from the returned
      RaggedTensor.  `padding` is a `Tensor` with the same dtype as `tensor`
      and with `shape=tensor.shape[ragged_rank + 1:]`.
    ragged_rank: Integer specifying the ragged rank for the returned
      `RaggedTensor`.  Must be greater than zero.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `RaggedTensor` with the specified `ragged_rank`.  The shape of the
    returned ragged tensor is compatible with the shape of `tensor`.
  Raises:
    ValueError: If both `lengths` and `padding` are specified.
  """
    if lengths is not None and padding is not None:
        raise ValueError('Specify lengths or padding, but not both')
    if not isinstance(ragged_rank, int):
        raise TypeError('ragged_rank expected int, got %r' % ragged_rank)
    if ragged_rank <= 0:
        raise ValueError('ragged_rank must be greater than 0; got %s' %
                         ragged_rank)

    with ops.name_scope(name, 'RaggedFromTensor', [tensor, lengths, padding]):
        tensor = ops.convert_to_tensor(tensor, name='tensor')
        tensor.shape.with_rank_at_least(ragged_rank + 1)
        input_shape = array_ops.shape(tensor, out_type=dtypes.int64)
        ncols = input_shape[1]

        # Handle ragged_rank>1 via recursion:
        # If the output should have multiple ragged dimensions, then first
        # flatten the tensor to eliminate all but the last ragged dimension,
        # and recursively convert that flattened tensor.  Then add on the splits
        # for the dimensions that we flattened out.
        if ragged_rank > 1:
            # Flatten `tensor` to eliminate all but the last ragged dimension.
            new_shape = array_ops.concat([
                constant_op.constant([-1], dtypes.int64),
                input_shape[ragged_rank:]
            ],
                                         axis=0)
            flattened = array_ops.reshape(tensor, new_shape)
            # Recursively convert the flattened tensor.
            values = from_tensor(flattened, lengths, padding)
            # The total number of elements in each  dimension.  E.g., if
            # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
            dim_size = math_ops.cumprod(input_shape)
            # Construct splits tensors for the dimensions that were flattened.
            new_splits = [
                math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim]
                for dim in range(1, ragged_rank)
            ]
            return ragged_factory_ops.from_nested_row_splits(
                values, new_splits)

        # If padding was specified, then use it to find row lengths.
        if padding is not None:
            padding = ops.convert_to_tensor(padding,
                                            name='padding',
                                            dtype=tensor.dtype)
            padding.shape.assert_is_compatible_with(tensor.shape[2:])

            # Find places where the padding is equal to the tensor.  (This will
            # broadcast `padding` across the outermost 2 dimensions of `tensor`,
            # so `has_default_value.shape = tensor.shape`.)
            has_default_value = math_ops.equal(padding, tensor)

            # If the padding isn't a scalar, then require that all values in the
            # padding match each item in the tensor.  After this block of code,
            # `has_default.shape = tensor.shape[:2]`.  (Unfortunately, we can't just
            # use reduce_all for both cases, becaue when you pass an empty `axis`
            # list to reduce_all, it reduces all axes; but we want it to reduce no
            # axes -- i.e., to be a no-op.)
            tensor_rank = array_ops.rank(tensor)
            reduce_axis = math_ops.range(2, tensor_rank)
            has_default = control_flow_ops.cond(
                tensor_rank > 2, lambda: math_ops.reduce_all(has_default_value,
                                                             axis=reduce_axis),
                lambda: has_default_value)
            has_default.set_shape(tensor_shape.TensorShape([None, None]))
            has_default.set_shape(tensor.shape[:2])

            # Use has_default it to find the length of each row: for each non-default
            # item in a row, calculate the length that the row needs to have to
            # include that item; and then take the max of those values (across each
            # row).
            has_nondefault = math_ops.logical_not(has_default)
            has_nondefault = math_ops.cast(has_nondefault, dtypes.int64)
            length_for_nondefault_value = (
                has_nondefault *
                array_ops.expand_dims(math_ops.range(1, ncols + 1), 0))
            lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)

        # If we have lengths (either directly supplied, or computed from paddings),
        # then use those to construct splits; and then use masking to get the
        # corresponding values.
        if lengths is not None:
            lengths = ragged_util.convert_to_int_tensor(
                lengths, 'lengths', dtypes.int64)
            lengths.shape.assert_has_rank(1)
            lengths = math_ops.minimum(lengths, ncols)
            lengths = math_ops.maximum(lengths, 0)
            limits = math_ops.cumsum(lengths)
            splits = array_ops.concat(
                [array_ops.zeros([1], dtypes.int64), limits], axis=0)
            mask = array_ops.sequence_mask(lengths, maxlen=ncols)
            values = array_ops.boolean_mask(tensor, mask)
            return ragged_factory_ops.from_row_splits(values, splits)

        # If neither padding nor lengths were specified, then create a splits
        # vector that contains no default values, and reshape the input tensor
        # to form the values for the RaggedTensor.
        nrows = input_shape[0]
        nvals = nrows * ncols
        splits = math_ops.range(nrows + 1) * ncols
        values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0)
        values = array_ops.reshape(tensor, values_shape)
        return ragged_factory_ops.from_row_splits(values, splits)
def from_tensor(tensor, lengths=None, padding=None, ragged_rank=1, name=None):
  """Converts a `Tensor` into a `RaggedTensor`.

  The set of absent/default values may be specified using a vector of lengths
  or a padding value (but not both).  If `lengths` is specified, then the
  output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`.
  If `padding` is specified, then any row *suffix* consisting entirely of
  `padding` will be excluded from the returned `RaggedTensor`.  If neither
  `lengths` nor `padding` is specified, then the returned `RaggedTensor` will
  have no absent/default values.

  Examples:

  ```python
  >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
  >>> ragged.from_tensor(dt).eval().tolist()
  [[5, 7, 0], [0, 3, 0], [6, 0, 0]]
  >>> ragged.from_tensor(dt, lengths=[2, 0, 3]).eval().tolist()
  [[5, 7], [], [6, 0, 0]]
  >>> ragged.from_tensor(dt, padding=0).eval().tolist()
  [[5, 7], [0, 3], [6]]
  ```

  Args:
    tensor: The `Tensor` to convert.  Must have rank `ragged_rank + 1` or
      higher.
    lengths: An optional set of row lengths, specified using a 1-D integer
      `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows in
      `tensor`).  If specified, then `output[row]` will contain
      `tensor[row][:lengths[row]]`.  Negative lengths are treated as zero.
    padding: An optional padding value.  If specified, then any row suffix
      consisting entirely of `padding` will be excluded from the returned
      RaggedTensor.  `padding` is a `Tensor` with the same dtype as `tensor`
      and with `shape=tensor.shape[ragged_rank + 1:]`.
    ragged_rank: Integer specifying the ragged rank for the returned
      `RaggedTensor`.  Must be greater than zero.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `RaggedTensor` with the specified `ragged_rank`.  The shape of the
    returned ragged tensor is compatible with the shape of `tensor`.
  Raises:
    ValueError: If both `lengths` and `padding` are specified.
  """
  if lengths is not None and padding is not None:
    raise ValueError('Specify lengths or padding, but not both')
  if not isinstance(ragged_rank, int):
    raise TypeError('ragged_rank expected int, got %r' % ragged_rank)
  if ragged_rank <= 0:
    raise ValueError('ragged_rank must be greater than 0; got %s' % ragged_rank)

  with ops.name_scope(name, 'RaggedFromTensor', [tensor, lengths, padding]):
    tensor = ops.convert_to_tensor(tensor, name='tensor')
    tensor.shape.with_rank_at_least(ragged_rank + 1)
    input_shape = array_ops.shape(tensor, out_type=dtypes.int64)
    ncols = input_shape[1]

    # Handle ragged_rank>1 via recursion:
    # If the output should have multiple ragged dimensions, then first
    # flatten the tensor to eliminate all but the last ragged dimension,
    # and recursively convert that flattened tensor.  Then add on the splits
    # for the dimensions that we flattened out.
    if ragged_rank > 1:
      # Flatten `tensor` to eliminate all but the last ragged dimension.
      new_shape = array_ops.concat(
          [constant_op.constant([-1], dtypes.int64), input_shape[ragged_rank:]],
          axis=0)
      flattened = array_ops.reshape(tensor, new_shape)
      # Recursively convert the flattened tensor.
      values = from_tensor(flattened, lengths, padding)
      # The total number of elements in each  dimension.  E.g., if
      # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
      dim_size = math_ops.cumprod(input_shape)
      # Construct splits tensors for the dimensions that were flattened.
      new_splits = [
          math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim]
          for dim in range(1, ragged_rank)
      ]
      return ragged_factory_ops.from_nested_row_splits(values, new_splits)

    # If padding was specified, then use it to find row lengths.
    if padding is not None:
      padding = ops.convert_to_tensor(
          padding, name='padding', dtype=tensor.dtype)
      padding.shape.assert_is_compatible_with(tensor.shape[2:])

      # Find places where the padding is equal to the tensor.  (This will
      # broadcast `padding` across the outermost 2 dimensions of `tensor`,
      # so `has_default_value.shape = tensor.shape`.)
      has_default_value = math_ops.equal(padding, tensor)

      # If the padding isn't a scalar, then require that all values in the
      # padding match each item in the tensor.  After this block of code,
      # `has_default.shape = tensor.shape[:2]`.  (Unfortunately, we can't just
      # use reduce_all for both cases, becaue when you pass an empty `axis`
      # list to reduce_all, it reduces all axes; but we want it to reduce no
      # axes -- i.e., to be a no-op.)
      tensor_rank = array_ops.rank(tensor)
      reduce_axis = math_ops.range(2, tensor_rank)
      has_default = control_flow_ops.cond(
          tensor_rank > 2,
          lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
          lambda: has_default_value)
      has_default.set_shape(tensor_shape.TensorShape([None, None]))
      has_default.set_shape(tensor.shape[:2])

      # Use has_default it to find the length of each row: for each non-default
      # item in a row, calculate the length that the row needs to have to
      # include that item; and then take the max of those values (across each
      # row).
      has_nondefault = math_ops.logical_not(has_default)
      has_nondefault = math_ops.cast(has_nondefault, dtypes.int64)
      length_for_nondefault_value = (
          has_nondefault * array_ops.expand_dims(
              math_ops.range(1, ncols + 1), 0))
      lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)

    # If we have lengths (either directly supplied, or computed from paddings),
    # then use those to construct splits; and then use masking to get the
    # corresponding values.
    if lengths is not None:
      lengths = ragged_util.convert_to_int_tensor(lengths, 'lengths',
                                                  dtypes.int64)
      lengths.shape.assert_has_rank(1)
      lengths = math_ops.minimum(lengths, ncols)
      lengths = math_ops.maximum(lengths, 0)
      limits = math_ops.cumsum(lengths)
      splits = array_ops.concat(
          [array_ops.zeros([1], dtypes.int64), limits], axis=0)
      mask = array_ops.sequence_mask(lengths, maxlen=ncols)
      values = array_ops.boolean_mask(tensor, mask)
      return ragged_factory_ops.from_row_splits(values, splits)

    # If neither padding nor lengths were specified, then create a splits
    # vector that contains no default values, and reshape the input tensor
    # to form the values for the RaggedTensor.
    nrows = input_shape[0]
    nvals = nrows * ncols
    splits = math_ops.range(nrows + 1) * ncols
    values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0)
    values = array_ops.reshape(tensor, values_shape)
    return ragged_factory_ops.from_row_splits(values, splits)