def concat_along_batch_dimension(outputs):
    """Concats prediction outputs along the batch dimension."""
    if isinstance(outputs[0], sparse_tensor.SparseTensor):
        return sparse_ops.sparse_concat_v2(axis=0, sp_inputs=outputs)
    if isinstance(outputs[0], ragged_tensor.RaggedTensor):
        return ragged_concat_ops.concat(outputs, axis=0)
    return np.concatenate(outputs)
  def testSingleTensorInput(self):
    """Tests ragged_concat with a single tensor input.

    Usually, we pass a list of values in for rt_inputs.  However, you can
    also pass in a single value (as with tf.concat), in which case it simply
    returns that tensor.  This test exercises that path.
    """
    rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]])
    concatenated = ragged_concat_ops.concat(rt_inputs, 0)
    self.assertRaggedEqual(concatenated, [[1, 2], [3, 4]])
 def testRuntimeError(self, rt_inputs, axis, error, message,
                      ragged_ranks=None):
   if context.executing_eagerly():
     return
   rt_inputs = [
       array_ops.placeholder_with_default(rt, shape=None) for rt in rt_inputs
   ]
   concatenated = ragged_concat_ops.concat(rt_inputs, axis)
   with self.assertRaisesRegexp(error, message):
     self.evaluate(concatenated)
    def testSingleTensorInput(self):
        """Tests ragged_concat with a single tensor input.

    Usually, we pass a list of values in for rt_inputs.  However, you can
    also pass in a single value (as with tf.concat), in which case it simply
    returns that tensor.  This test exercises that path.
    """
        rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]])
        concatenated = ragged_concat_ops.concat(rt_inputs, 0)
        self.assertAllEqual(concatenated, [[1, 2], [3, 4]])
 def testRaggedConcat(self,
                      descr,
                      rt_inputs,
                      axis,
                      expected,
                      ragged_ranks=None,
                      expected_ragged_rank=None,
                      expected_shape=None):
   rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)
   concatenated = ragged_concat_ops.concat(rt_inputs, axis)
   if expected_ragged_rank is not None:
     self.assertEqual(concatenated.ragged_rank, expected_ragged_rank)
   if expected_shape is not None:
     self.assertEqual(concatenated.shape.as_list(), expected_shape)
   self.assertRaggedEqual(concatenated, expected)
 def testRuntimeError(self,
                      rt_inputs,
                      axis,
                      error,
                      message,
                      ragged_ranks=None):
     if context.executing_eagerly():
         return
     rt_inputs = [
         array_ops.placeholder_with_default(rt, shape=None)
         for rt in rt_inputs
     ]
     concatenated = ragged_concat_ops.concat(rt_inputs, axis)
     with self.assertRaisesRegex(error, message):
         self.evaluate(concatenated)
 def testRaggedConcat(self,
                      descr,
                      rt_inputs,
                      axis,
                      expected,
                      ragged_ranks=None,
                      expected_ragged_rank=None,
                      expected_shape=None):
     rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)
     concatenated = ragged_concat_ops.concat(rt_inputs, axis)
     if expected_ragged_rank is not None:
         self.assertEqual(concatenated.ragged_rank, expected_ragged_rank)
     if expected_shape is not None:
         self.assertEqual(concatenated.shape.as_list(), expected_shape)
     self.assertAllEqual(concatenated, expected)
Example #8
0
def append_composite_tensor(target, to_append):
    """Helper function to append composite tensors to each other in the 0 axis.

  In order to support batching within a fit/evaluate/predict call, we need
  to be able to aggregate within a CompositeTensor. Unfortunately, the CT
  API currently does not make this easy - especially in V1 mode, where we're
  working with CompositeTensor Value objects that have no connection with the
  CompositeTensors that created them.

  Arguments:
    target: CompositeTensor or CompositeTensor value object that will be
      appended to.
    to_append: CompositeTensor or CompositeTensor value object to append to.
      'target'.

  Returns:
    A CompositeTensor or CompositeTensor value object.

  Raises:
    RuntimeError: if concatenation is not possible.
  """
    if type(target) is not type(to_append):
        raise RuntimeError('Unable to concatenate %s and %s' %
                           (type(target), type(to_append)))

    # Perform type-specific concatenation.
    # TODO(b/125094323): This should be replaced by a simple call to
    # target.append() that should work on all of the below classes.

    # If we're seeing a CompositeTensor here, we know it's because we're in
    # Eager mode (or else we'd have evaluated the CT to a CT Value object
    # already). Therefore, it's safe to call concat() on it without evaluating
    # the result any further. If not - that is, if we're seeing a
    # SparseTensorValue or a RaggedTensorValue - we need to hand-update it
    # since we're outside of the graph anyways.
    if isinstance(target, sparse_tensor.SparseTensor):
        # We need to invoke the sparse version of concatenate here - tf.concat
        # won't work.
        return sparse_ops.sparse_concat(sp_inputs=[target, to_append], axis=0)
    elif isinstance(target, ragged_tensor.RaggedTensor):
        return ragged_concat_ops.concat([target, to_append], axis=0)
    elif isinstance(target, sparse_tensor.SparseTensorValue):
        return _append_sparse_tensor_value(target, to_append)
    elif isinstance(target, ragged_tensor_value.RaggedTensorValue):
        return _append_ragged_tensor_value(target, to_append)
    else:
        raise RuntimeError('Attempted to concatenate unsupported object %s.' %
                           type(target))
def append_composite_tensor(target, to_append):
  """Helper function to append composite tensors to each other in the 0 axis.

  In order to support batching within a fit/evaluate/predict call, we need
  to be able to aggregate within a CompositeTensor. Unfortunately, the CT
  API currently does not make this easy - especially in V1 mode, where we're
  working with CompositeTensor Value objects that have no connection with the
  CompositeTensors that created them.

  Arguments:
    target: CompositeTensor or CompositeTensor value object that will be
      appended to.
    to_append: CompositeTensor or CompositeTensor value object to append to.
      'target'.

  Returns:
    A CompositeTensor or CompositeTensor value object.

  Raises:
    RuntimeError: if concatenation is not possible.
  """
  if type(target) is not type(to_append):
    raise RuntimeError('Unable to concatenate %s and %s' %
                       (type(target), type(to_append)))

  # Perform type-specific concatenation.
  # TODO(b/125094323): This should be replaced by a simple call to
  # target.append() that should work on all of the below classes.

  # If we're seeing a CompositeTensor here, we know it's because we're in
  # Eager mode (or else we'd have evaluated the CT to a CT Value object
  # already). Therefore, it's safe to call concat() on it without evaluating
  # the result any further. If not - that is, if we're seeing a
  # SparseTensorValue or a RaggedTensorValue - we need to hand-update it
  # since we're outside of the graph anyways.
  if isinstance(target, sparse_tensor.SparseTensor):
    # We need to invoke the sparse version of concatenate here - tf.concat
    # won't work.
    return sparse_ops.sparse_concat(sp_inputs=[target, to_append], axis=0)
  elif isinstance(target, ragged_tensor.RaggedTensor):
    return ragged_concat_ops.concat([target, to_append], axis=0)
  elif isinstance(target, sparse_tensor.SparseTensorValue):
    return _append_sparse_tensor_value(target, to_append)
  elif isinstance(target, ragged_tensor_value.RaggedTensorValue):
    return _append_ragged_tensor_value(target, to_append)
  else:
    raise RuntimeError('Attempted to concatenate unsupported object %s.' %
                       type(target))
Example #10
0
  def test_Bidirectional_ragged_input(self, merge_mode):
    if test.is_built_with_rocm():
      # ragged tenors are not supported in ROCM RNN implementation
      self.skipTest('Test not supported on the ROCm platform')
    np.random.seed(100)
    rnn = keras.layers.LSTM
    units = 3
    x = ragged_factory_ops.constant(
        [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1]],
         [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]],
         [[1, 1, 1], [1, 1, 1], [1, 1, 1]]],
        ragged_rank=1)
    x = math_ops.cast(x, 'float32')

    # pylint: disable=g-long-lambda
    with self.cached_session():
      if merge_mode == 'ave':
        merge_func = lambda y, y_rev: (y + y_rev) / 2
      elif merge_mode == 'concat':
        merge_func = lambda y, y_rev: ragged_concat_ops.concat(
            (y, y_rev), axis=-1)
      elif merge_mode == 'mul':
        merge_func = lambda y, y_rev: (y * y_rev)
        # pylint: enable=g-long-lambda

      inputs = keras.Input(
          shape=(None, 3), batch_size=4, dtype='float32', ragged=True)
      layer = keras.layers.Bidirectional(
          rnn(units, return_sequences=True), merge_mode=merge_mode)
      f_merged = keras.backend.function([inputs], layer(inputs))
      f_forward = keras.backend.function([inputs],
                                         layer.forward_layer(inputs))

      # TODO(kaftan): after KerasTensor refactor TF op layers should work
      # with many composite tensors, and this shouldn't need to be a lambda
      # layer.
      reverse_layer = core.Lambda(array_ops.reverse, arguments=dict(axis=[1]))
      f_backward = keras.backend.function(
          [inputs],
          reverse_layer(layer.backward_layer(inputs)))

      y_merged = f_merged(x)
      y_expected = merge_func(
          ragged_tensor.convert_to_tensor_or_ragged_tensor(f_forward(x)),
          ragged_tensor.convert_to_tensor_or_ragged_tensor(f_backward(x)))

      y_merged = ragged_tensor.convert_to_tensor_or_ragged_tensor(y_merged)
      self.assertAllClose(y_merged.flat_values, y_expected.flat_values)
Example #11
0
    def test_Bidirectional_ragged_input(self):
        np.random.seed(100)
        rnn = keras.layers.LSTM
        units = 3
        x = ragged_factory_ops.constant(
            [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1]],
             [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]],
             [[1, 1, 1], [1, 1, 1], [1, 1, 1]]],
            ragged_rank=1)
        x = math_ops.cast(x, 'float32')

        # pylint: disable=g-long-lambda
        with self.cached_session():
            for merge_mode in ['ave', 'concat', 'mul']:
                if merge_mode == 'ave':
                    merge_func = lambda y, y_rev: (y + y_rev) / 2
                elif merge_mode == 'concat':
                    merge_func = lambda y, y_rev: ragged_concat_ops.concat(
                        (y, y_rev), axis=-1)
                elif merge_mode == 'mul':
                    merge_func = lambda y, y_rev: (y * y_rev)

                inputs = keras.Input(shape=(None, 3),
                                     batch_size=4,
                                     dtype='float32',
                                     ragged=True)
                layer = keras.layers.Bidirectional(rnn(units,
                                                       return_sequences=True),
                                                   merge_mode=merge_mode)
                f_merged = keras.backend.function([inputs], layer(inputs))
                f_forward = keras.backend.function([inputs],
                                                   layer.forward_layer(inputs))
                f_backward = keras.backend.function(
                    [inputs],
                    array_ops.reverse(layer.backward_layer(inputs), axis=[1]))

                y_merged = f_merged(x)
                y_expected = merge_func(
                    ragged_tensor.convert_to_tensor_or_ragged_tensor(
                        f_forward(x)),
                    ragged_tensor.convert_to_tensor_or_ragged_tensor(
                        f_backward(x)))

                y_merged = ragged_tensor.convert_to_tensor_or_ragged_tensor(
                    y_merged)
                self.assertAllClose(y_merged.flat_values,
                                    y_expected.flat_values)
def _elementwise_where(condition, x, y):
    """Ragged version of tf.where(condition, x, y)."""
    condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)
    x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)
    y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)

    if not (condition_is_ragged or x_is_ragged or y_is_ragged):
        return array_ops.where(condition, x, y)

    elif condition_is_ragged and x_is_ragged and y_is_ragged:
        return ragged_functional_ops.map_flat_values(array_ops.where,
                                                     condition, x, y)
    elif not condition_is_ragged:
        # Concatenate x and y, and then use `gather` to assemble the selected rows.
        condition.shape.assert_has_rank(1)
        x_nrows = _nrows(x)
        x_and_y = ragged_concat_ops.concat([x, y], axis=0)
        indices = array_ops.where(condition, math_ops.range(x_nrows),
                                  x_nrows + math_ops.range(_nrows(y)))
        return ragged_gather_ops.gather(x_and_y, indices)

    else:
        raise ValueError('Input shapes do not match.')
Example #13
0
def _elementwise_where(condition, x, y):
  """Ragged version of tf.where(condition, x, y)."""
  condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)
  x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)
  y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)

  if not (condition_is_ragged or x_is_ragged or y_is_ragged):
    return array_ops.where(condition, x, y)

  elif condition_is_ragged and x_is_ragged and y_is_ragged:
    return ragged_functional_ops.map_flat_values(array_ops.where, condition, x,
                                                 y)
  elif not condition_is_ragged:
    # Concatenate x and y, and then use `gather` to assemble the selected rows.
    condition.shape.assert_has_rank(1)
    x_nrows = _nrows(x)
    x_and_y = ragged_concat_ops.concat([x, y], axis=0)
    indices = array_ops.where(condition, math_ops.range(x_nrows),
                              x_nrows + math_ops.range(_nrows(y)))
    return ragged_gather_ops.gather(x_and_y, indices)

  else:
    raise ValueError('Input shapes do not match.')
Example #14
0
 def _concat(i):
   self.assertTrue(ragged_tensor.is_ragged(i))
   return ragged_concat_ops.concat([i, i], 0)
Example #15
0
 def _concat(i):
   self.assertTrue(ragged_tensor.is_ragged(i))
   return ragged_concat_ops.concat([i, i], 0)