def testDocStringExamples(self): """Test the examples in apply_op_to_ragged_values.__doc__.""" rt = ragged.constant([[1, 2, 3], [], [4, 5], [6]]) v1 = ragged.map_flat_values(array_ops.ones_like, rt) v2 = ragged.map_flat_values(math_ops.multiply, rt, rt) v3 = ragged.map_flat_values(math_ops.add, rt, 5) self.assertRaggedEqual(v1, [[1, 1, 1], [], [1, 1], [1]]) self.assertRaggedEqual(v2, [[1, 4, 9], [], [16, 25], [36]]) self.assertRaggedEqual(v3, [[6, 7, 8], [], [9, 10], [11]])
def testRaggedMapOnStructure_RaggedOutputs(self): batman = ragged.constant([[1, 2, 3], [4], [5, 6, 7]]) # [[10, 20, 30], [40], [50, 60, 70]] robin = ragged.map_flat_values(mo.multiply, batman, 10) features = {'batman': batman, 'robin': robin} def _increment(f): return { 'batman': f['batman'] + 1, 'robin': f['robin'] + 1, } output = ragged.map_fn( fn=_increment, elems=features, infer_shape=False, dtype={ 'batman': ragged.RaggedTensorType(dtype=dtypes.int32, ragged_rank=1), 'robin': ragged.RaggedTensorType(dtype=dtypes.int32, ragged_rank=1) }, ) self.assertRaggedEqual(output['batman'], [[2, 3, 4], [5], [6, 7, 8]]) self.assertRaggedEqual(output['robin'], [[11, 21, 31], [41], [51, 61, 71]])
def assertRaggedMapInnerValuesReturns(self, op, expected, args=(), kwargs=None): kwargs = kwargs or {} result = ragged.map_flat_values(op, *args, **kwargs) self.assertRaggedEqual(result, expected)
def testRaggedTensorSplitsMismatchErrorAtRuntime(self): splits1 = array_ops.placeholder_with_default( constant_op.constant([0, 3, 3, 5], dtypes.int64), None) splits2 = array_ops.placeholder_with_default( constant_op.constant([0, 1, 3, 5], dtypes.int64), None) x = ragged.RaggedTensor.from_row_splits([3, 1, 4, 1, 5], splits1) y = ragged.RaggedTensor.from_row_splits([1, 2, 3, 4, 5], splits2) with self.assertRaisesRegexp( errors.InvalidArgumentError, r'.*Inputs must have identical ragged splits'): self.evaluate(ragged.map_flat_values(math_ops.add, x, y))
def testGradient(self): if context.executing_eagerly(): return # rt1.shape == rt2.shape == [2, (D2), (D3), 2]. rt1 = ragged.constant([[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0]]]], ragged_rank=2) rt2 = ragged.constant([[[[9.0, 8.0], [7.0, 6.0]], [[5.0, 4.0]]]], ragged_rank=2) rt = ragged.map_flat_values(math_ops.add, rt1, rt2 * 2.0) st = rt.to_sparse() g1, g2 = gradients_impl.gradients(st.values, [rt1.flat_values, rt2.flat_values]) print(g1, g2) self.assertRaggedEqual(g1, [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]) self.assertRaggedEqual(g2, [[2.0, 2.0], [2.0, 2.0], [2.0, 2.0]])
def testRaggedMapOnStructure(self): batman = ragged.constant([[1, 2, 3], [4], [5, 6, 7]]) # [[10, 20, 30], [40], [50, 60, 70]] robin = ragged.map_flat_values(mo.multiply, batman, 10) features = {'batman': batman, 'robin': robin} def _reduce_sum_from_all(f): return mo.reduce_sum(f['batman']) + mo.reduce_sum(f['robin']) output = ragged.map_fn( fn=_reduce_sum_from_all, elems=features, dtype=dtypes.int32, ) self.assertRaggedEqual(output, [66, 44, 198])
class RaggedMapOpTest(ragged_test_util.RaggedTensorTestCase, parameterized.TestCase): @parameterized.parameters([ # The following test sets map over a RaggedTensor and apply a # transformation that returns with shape: # [d1, (d2)] -> [d1] dict( fn=mo.reduce_mean, elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[2, 4, 6], ), dict( fn=string_ops.reduce_join, elems=[['foo', 'bar', 'baz'], ['a'], ['b', 'c']], expected_output=[b'foobarbaz', b'a', b'bc'], dtype=dtypes.string, ), # [d1, (d2)] -> [d1, 2] dict( fn=lambda x: array_ops.stack([mo.reduce_mean(x), mo.reduce_sum(x)]), # fn=self.stack_mean_and_sum, elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[[2, 6], [4.5, 9], [6.5, 13]], dtype=dtypes.float32, expected_ragged_rank=0, ), # [d1, (d2)] -> [d1, (d2)] dict( fn=lambda x: x + np.int64(1), elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[[2, 3, 4], [5, 6], [7, 8]], dtype=dtypes.int64, result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1), ), # [d1, (d2), d3] -> [d1, (d2), d3] dict( fn=lambda x: x + np.int64(1), elems=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], elems_ragged_rank=1, expected_ragged_rank=1, result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1), expected_output=[[[2, 3], [4, 5]], [], [[6, 7], [8, 9], [10, 1]]], ), # [d1, (d2)] -> [d1, (d2), (d3)] dict( fn=lambda x: ragged.RaggedTensor.from_row_starts(x, [0]), elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[[[1, 2, 3]], [[4, 5]], [[6, 7]]], result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=2), ), # [d1, (d2), (d3)] -> [d1, (d2), (d3)] dict( fn=lambda x: ragged.map_flat_values(mo.add, x, 1), elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[[[2, 3, 4]], [[5, 6], [7, 8]]], result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=2), ), # [d1, (d2), (d3)] -> [d1, (d2)] dict( fn=lambda x: ragged.reduce_sum(x, axis=1), elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[[6], [9, 13]], result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1), ), # [d1, (d2), (d3)] -> [d1, (d3)] dict( fn=lambda x: ragged.reduce_sum(x, axis=0), elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[[1, 2, 3], [10, 12]], result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1), ), # [d1, (d2), (d3)] -> [d1] dict( fn=ragged.reduce_sum, elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[6, 22], result_dtype=dtypes.int64, ), # [d1] -> [d1, (d2)] dict( fn=mo.range, elems=[4, 0, 2], expected_output=[[0, 1, 2, 3], [], [0, 1]], result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1), ), # [d1] -> [d1, (d2), (d3)] dict( fn=lambda x: ragged.range(mo.range(x)), elems=[5, 0, 3], expected_output=[[[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3]], [], [[], [0], [0, 1]]], result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=2), ), # [d1, (d2), (d3), (d4a), (d5)] -> [d1, (d2), (d3), (d4b), (d5)] dict( fn=lambda x: x + np.int64(1), elems=[[[[[1, 2, 3]], [[4], [5]]]], [[[[6, 7]]], [[[8], []]]]], expected_output=[[[[[2, 3, 4]], [[5], [6]]]], [[[[7, 8]]], [[[9], []]]]], result_dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=4), ), ]) def testRaggedMap( self, fn, elems, expected_output, expected_ragged_rank=None, result_ragged_rank=None, elems_ragged_rank=None, dtype=dtypes.int64, result_dtype=None, infer_shape=False, ): elems = ragged.constant(elems, dtype, elems_ragged_rank) output = ragged.map_fn(fn=fn, elems=elems, dtype=result_dtype, infer_shape=infer_shape) expected_rt = ragged.constant(expected_output, ragged_rank=expected_ragged_rank) self.assertRaggedEqual(expected_rt, output) def testRaggedMapOnStructure(self): batman = ragged.constant([[1, 2, 3], [4], [5, 6, 7]]) # [[10, 20, 30], [40], [50, 60, 70]] robin = ragged.map_flat_values(mo.multiply, batman, 10) features = {'batman': batman, 'robin': robin} def _reduce_sum_from_all(f): return mo.reduce_sum(f['batman']) + mo.reduce_sum(f['robin']) output = ragged.map_fn( fn=_reduce_sum_from_all, elems=features, dtype=dtypes.int32, ) self.assertRaggedEqual(output, [66, 44, 198]) # Test mapping over a dict of RTs can produce a dict of RTs. def testRaggedMapOnStructure_RaggedOutputs(self): batman = ragged.constant([[1, 2, 3], [4], [5, 6, 7]]) # [[10, 20, 30], [40], [50, 60, 70]] robin = ragged.map_flat_values(mo.multiply, batman, 10) features = {'batman': batman, 'robin': robin} def _increment(f): return { 'batman': f['batman'] + 1, 'robin': f['robin'] + 1, } output = ragged.map_fn( fn=_increment, elems=features, infer_shape=False, dtype={ 'batman': ragged.RaggedTensorType(dtype=dtypes.int32, ragged_rank=1), 'robin': ragged.RaggedTensorType(dtype=dtypes.int32, ragged_rank=1) }, ) self.assertRaggedEqual(output['batman'], [[2, 3, 4], [5], [6, 7, 8]]) self.assertRaggedEqual(output['robin'], [[11, 21, 31], [41], [51, 61, 71]]) def testZip(self): x = ragged.constant( [[10, 20], [30, 40], [50, 60], [70], [80, 90, 100]], dtypes.int64) y = array_ops.expand_dims(mo.range(x.nrows(), dtype=dtypes.int64), axis=1) def _zip(foo): y_val, x_val = foo bar = backend.tile(y_val, array_ops.shape(x_val)) return array_ops.stack([bar, x_val], axis=1) output = ragged.map_fn(_zip, (y, x), dtype=ragged.RaggedTensorType( dtype=dtypes.int64, ragged_rank=1), infer_shape=False) self.assertRaggedEqual( output, [[[0, 10], [0, 20]], [[1, 30], [1, 40]], [[2, 50], [2, 60]], [[3, 70]], [[4, 80], [4, 90], [4, 100]]]) def testBatchGather(self): tokens = ragged.constant([['hello', '.', 'there'], ['merhaba'], ['bonjour', '.', 'ca va', '?']]) indices = ragged.constant([[0, 2], [0], [0, 2]]) def gather(x): tokens_val, indices_val = x return array_ops.gather(tokens_val, indices_val) data = tokens, indices out = ragged.map_fn(gather, data, dtype=ragged.RaggedTensorType(dtype=dtypes.string, ragged_rank=1), infer_shape=False) self.assertRaggedEqual( out, [[b'hello', b'there'], [b'merhaba'], [b'bonjour', b'ca va']]) def testMismatchRaggedRank(self): elems = ragged.constant([[[1, 2, 3]], [[4, 5], [6, 7]]]) fn = lambda x: ragged.reduce_sum(x, axis=0) with self.assertRaisesWithLiteralMatch( ValueError, r'The declared ragged rank (23) mismatches the result (1)'): _ = ragged.map_fn(fn, elems, dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=23)) def testMismatchRaggedRank2(self): elems = ragged.constant([[1, 2, 3], [4, 5], [6, 7]]) fn = lambda x: ragged.RaggedTensor.from_row_starts(x, [0]) with self.assertRaisesWithLiteralMatch( ValueError, r'The declared ragged rank (10) mismatches the result (1)'): _ = ragged.map_fn(fn, elems, dtype=ragged.RaggedTensorType(dtype=dtypes.int64, ragged_rank=10))