def test_hardshrink(self, dtype):
        x = tf.constant([-2.0, -0.5, 0.0, 0.5, 2.0], dtype=dtype)
        expected_result = tf.constant([-2.0, 0.0, 0.0, 0.0, 2.0], dtype=dtype)
        self.assertAllCloseAccordingToType(hardshrink(x), expected_result)

        expected_result = tf.constant([-2.0, 0.0, 0.0, 0.0, 2.0], dtype=dtype)
        self.assertAllCloseAccordingToType(
            hardshrink(x, lower=-1.0, upper=1.0), expected_result)
    def test_unknown_shape(self):
        fn = hardshrink.get_concrete_function(
            tf.TensorSpec(shape=None, dtype=tf.float32))

        for shape in [(1,), (1, 2), (1, 2, 3), (1, 2, 3, 4)]:
            x = tf.ones(shape=shape, dtype=tf.float32)
            self.assertAllClose(fn(x), hardshrink(x))
Exemple #3
0
    def test_theoretical_gradients(self, dtype):
        # Only test theoretical gradients for float32 and float64
        # because of the instability of float16 while computing jacobian
        x = tf.constant([-1.5, -0.5, 0.5, 1.5], dtype=dtype)

        theoretical, numerical = tf.test.compute_gradient(
            lambda x: hardshrink(x), [x])
        self.assertAllCloseAccordingToType(theoretical, numerical, atol=1e-4)
Exemple #4
0
    def test_gradients(self, dtype):
        x = tf.constant([-1.5, -0.5, 0.5, 1.5], dtype=dtype)

        with tf.GradientTape(persistent=True) as tape:
            tape.watch(x)
            y_ref = _ref_hardshrink(x)
            y = hardshrink(x)
        grad_ref = tape.gradient(y_ref, x)
        grad = tape.gradient(y, x)
        self.assertAllCloseAccordingToType(grad, grad_ref)
Exemple #5
0
    def verify_funcs_are_equivalent(self, dtype):
        x_np = np.random.uniform(-10, 10, size=(4, 4)).astype(dtype)
        x = tf.convert_to_tensor(x_np)
        lower = np.random.uniform(-10, 10)
        upper = lower + np.random.uniform(0, 10)

        with tf.GradientTape(persistent=True) as t:
            t.watch(x)
            y_native = hardshrink(x, lower, upper)
            y_py = _hardshrink_py(x, lower, upper)

        self.assertAllCloseAccordingToType(y_native, y_py, atol=1e-4)

        grad_native = t.gradient(y_native, x)
        grad_py = t.gradient(y_py, x)

        self.assertAllCloseAccordingToType(grad_native, grad_py, atol=1e-4)
 def test_invalid(self):
     with self.assertRaisesOpError(
             "lower must be less than or equal to upper."):  # pylint: disable=bad-continuation
         y = hardshrink(tf.ones(shape=(1, 2, 3)), lower=2.0, upper=-2.0)
         self.evaluate(y)
Exemple #7
0
 def test_hardshrink(self, dtype):
     x = (np.random.rand(2, 3, 4) * 2.0 - 1.0).astype(dtype)
     self.assertAllCloseAccordingToType(hardshrink(x), _ref_hardshrink(x))
     self.assertAllCloseAccordingToType(hardshrink(x, -2.0, 2.0),
                                        _ref_hardshrink(x, -2.0, 2.0))