def testSquare(self): """Test basic testing infrastructure.""" def f(x): return x * x self._generic_test(f, [ Example(arg=(3,), out=9., failure=[], bugs=[]), Example(arg=(3.2,), out=10.24, failure=[], bugs=[]), Example( arg=(tf.constant(3.),), out=tf.constant(9.), failure=[], bugs=[]), ])
def testSquare(self): """Test basic testing infrastructure.""" def f(x): return x * x # Tests involving type promotions are added to: # //tensorflow/tools/consistency_integration_test/type_promotion_tests.py self._generic_test(f, [ Example(arg=(3,), out=9., failure=[], bugs=[]), Example( arg=(tf.constant(3.),), out=tf.constant(9.), failure=[], bugs=[]), ])
def testFloatingPointPrecision(self): """Tests inconsistent floating point precision between eager vs. graph. Bugs: b/187097409 Status: Inconsistent floating point precision Issue: Output returned from a function is different between when the function is decorated with tf.function or not. Running the tf.function in XLA mode also is inconsistent with running the function in RAW mode (i.e. running tf.function eagerly). Notes: * This behavior is consistent with the tensor wrapping rules (i.e. `tf.constant`s are taken as `tf.float32` by default) but requires further discussion for achieving better consistency. * For getting consistent results back, the suggestion is to explicitly construct tensors for inputs. See the test case below that passes `tf.constant(3.2)` as `arg`. """ def f(x): return x * x # Note that running the same test in different modes results in different # floating point precisions. # RunMode: RAW self._generic_test( f, [ Example(arg=(3.2,), out=10.240000000000002, failure=[], bugs=[]), ], # TODO(b/187250924): `RunMode.SAVED` fails to run. skip_modes=[RunMode.FUNCTION, RunMode.XLA, RunMode.SAVED]) # RunMode: FUNCTION, XLA, SAVED self._generic_test( f, [ Example(arg=(3.2,), out=10.239999771118164, failure=[], bugs=[]), ], skip_modes=[RunMode.RAW]) # Explicitly construct tensor for input for getting consistent results # across all `RunMode`s. # RunMode: RAW, FUNCTION, XLA, SAVED self._generic_test( f, [ Example( arg=(tf.constant(3.2),), out=10.24000072479248, failure=[], bugs=[]), ], skip_modes=[])
def testFailureParamAsDict(self): """Tests passing in a `dict` for `failure` param to `_generic_test`.""" def f(ta): return ta.stack() ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0) ta = ta.write(0, tf.constant([1.0, 2.0])) ta = ta.write(1, tf.constant([3.0, 4.0])) out_t = tf.constant([[1.0, 2.0], [3.0, 4.0]]) input_signature = [ tf.TensorArraySpec(element_shape=None, dtype=tf.float32, dynamic_size=True) ] self._generic_test(f, [ Example(arg=(ta, ), out=out_t, failure={ RunMode.FUNCTION: 'If shallow structure is a sequence, input must also ' 'be a sequence', RunMode.XLA: 'If shallow structure is a sequence, input must also ' 'be a sequence', RunMode.SAVED: 'Found zero restored functions for caller function', }, bugs=['b/162452468']) ], input_signature=input_signature, skip_modes=[]) return
def testObjectInput(self): """Test taking a Python object. Should work in tf.function but not sm.""" class A: def __init__(self): self.value = 3.0 def f(x): return x.value self._generic_test( f, [Example(arg=(A(), ), out=3.0, failure=[RunMode.SAVED], bugs=[])]) return
def testSkipModes(self): """Tests `skip_modes` option available with `_generic_test`.""" class A: def __init__(self, x): self.value = x def f(x): return A(x) self._generic_test( f, [Example(arg=(3.,), out=3.0, failure=[], bugs=[])], # Skip all tests as the test will fail in all modes. skip_modes=[RunMode.RAW, RunMode.XLA, RunMode.FUNCTION, RunMode.SAVED]) return
def testTensorArrayBasic(self): """Tests `_generic_test` with a `tf.TensorArray` as input to tf.function.""" def f(x): return x.stack() ta = tf.TensorArray(dtype=tf.int32, dynamic_size=True, size=0) ta = ta.write(0, tf.constant([1, 2, 3])) ta = ta.write(1, tf.constant([4, 5, 6])) self._generic_test( f, [ Example( arg=(ta, ), out=tf.constant([[1, 2, 3], [4, 5, 6]]), failure=[RunMode.SAVED], # TODO(b/187250924): Investigate. bugs=['b/180921284']) ]) return
def testObjectOutput(self): """Test returning a Python object. Doesn't and shouldn't work.""" class A: def __init__(self, x): self.value = x def f(x): return A(x) self._generic_test(f, [ Example( arg=(3.,), out=3.0, failure=[RunMode.XLA, RunMode.FUNCTION, RunMode.SAVED], bugs=[]) ]) return
def testNotEqualOutput(self): """Tests that an error is thrown if the outputs are not equal. This test case is meant to test the consistency test infrastructure that the output of executing `f()` matches the groundtruth we provide as the `out` param in `_generic_test()`. """ mock_func = test.mock.MagicMock(name='method') mock_func.return_value = 0 # This differs from the `expected` value below. mock_func.__doc__ = 'Tested with a mock function.' failure_modes = [RunMode.RAW, RunMode.FUNCTION, RunMode.XLA, RunMode.SAVED] input_args = [3, 3.2, tf.constant(3.)] expected = 1 # Randomly picked value just for testing purposes. for input_arg in input_args: self._generic_test(mock_func, [ Example( arg=(input_arg,), out=expected, failure=failure_modes, bugs=[]) ])