def _test_fn(): with backprop.GradientTape() as tape: x = array_ops.ones([5, 5]) tape.watch(x) y = math_ops.reduce_euclidean_norm( x, axis=constant_op.constant(1)) return y, tape.gradient(y, x)
def test2D_4(self): for dtype in [dtypes.float32, dtypes.float64]: x = constant_op.constant([[3], [4]], dtype=dtype) grads = gradient_checker_v2.compute_gradient( lambda x: math_ops.reduce_euclidean_norm(x, 1), [x]) err = gradient_checker_v2.max_error(*grads) self.assertLess(err, 1e-3)
def test3D_4(self): for dtype in [dtypes.float32, dtypes.float64]: x = constant_op.constant( [[[-3, 5], [7, 11]], [[13, 17], [19, 23]]], dtype=dtype) grads = gradient_checker_v2.compute_gradient( lambda x: math_ops.reduce_euclidean_norm(x, 2), [x]) err = gradient_checker_v2.max_error(*grads) self.assertLess(err, 2e-3)
def testComplex128(self): for rank in range(1, _MAX_RANK + 1): np_arr = self._makeIncremental((2, ) * rank, dtypes.complex128) self._compareAllAxes(np_arr) with self.session(use_gpu=True): for dtype in (dtypes.float16, dtypes.float32, dtypes.float64): # A large number is needed to get Eigen to die x = array_ops.zeros((0, 9938), dtype=dtype) y = math_ops.reduce_euclidean_norm(x, [0]).eval() self.assertEqual(y.shape, (9938, )) self.assertAllEqual(y, np.zeros(9938))
def testZeros(self): for dtype in [dtypes.float32, dtypes.float64]: x = constant_op.constant([0.0, -0.0], dtype=dtype) with backprop.GradientTape() as tape: tape.watch(x) y = math_ops.reduce_euclidean_norm(x) dx = tape.gradient(y, x) dx_answer = constant_op.constant( [float("NaN"), float("NaN")], dtype=dtype) self.assertAllClose(dx, dx_answer)
def _tf_reduce(self, x, reduction_axes, keepdims): return math_ops.reduce_euclidean_norm(x, reduction_axes, keepdims)