def _testSelu(self, np_features, use_gpu=False):
     np_selu = self._npSelu(np_features)
     with self.test_session(use_gpu=use_gpu):
         selu = nn_ops.selu(np_features)
         tf_selu = selu.eval()
     self.assertAllClose(np_selu, tf_selu)
     self.assertShapeEqual(np_selu, selu)
Beispiel #2
0
 def _testSelu(self, np_features, use_gpu=False):
   np_selu = self._npSelu(np_features)
   with self.test_session(use_gpu=use_gpu):
     selu = nn_ops.selu(np_features)
     tf_selu = selu.eval()
   self.assertAllClose(np_selu, tf_selu)
   self.assertShapeEqual(np_selu, selu)
Beispiel #3
0
 def testGradientFloat64(self):
   with self.cached_session():
     x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
     x = constant_op.constant(x_val, dtype=dtypes.float64, name="x")
     y = nn_ops.selu(x, name="selu")
     x_init = np.asarray(x_val, dtype=np.float64, order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   print("selu (float64) gradient err = ", err)
   self.assertLess(err, 1e-6)
Beispiel #4
0
 def testGradientFloat64(self):
   with self.test_session():
     x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
     x = constant_op.constant(x_val, dtype=dtypes.float64, name="x")
     y = nn_ops.selu(x, name="selu")
     x_init = np.asarray(x_val, dtype=np.float64, order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   print("selu (float64) gradient err = ", err)
   self.assertLess(err, 1e-6)
Beispiel #5
0
 def testGradGradFloat32(self):
   with self.cached_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.selu(x, name="selu")
     z = gradients_impl.gradients(y, x)
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], z[0], [2, 5], x_init_value=x_init)
   print("selu (float32) gradient of gradient err = ", err)
   self.assertLess(err, 1e-4)
Beispiel #6
0
 def testGradGradFloat32(self):
   with self.test_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.selu(x, name="selu")
     z = gradients_impl.gradients(y, x)
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], z[0], [2, 5], x_init_value=x_init)
   print("selu (float32) gradient of gradient err = ", err)
   self.assertLess(err, 1e-4)
Beispiel #7
0
 def f(x):
     assert x.dtype == dtypes.float64
     with backprop.GradientTape() as tape:
         tape.watch(x)
         y = nn_ops.selu(x)
     return tape.gradient(y, x)
Beispiel #8
0
 def _testSelu(self, np_features):
     np_selu = self._npSelu(np_features)
     tf_selu = nn_ops.selu(np_features)
     self.assertAllClose(np_selu, tf_selu)
     self.assertShapeEqual(np_selu, tf_selu)
Beispiel #9
0
 def f(x):
   assert x.dtype == dtypes.float64
   with backprop.GradientTape() as tape:
     tape.watch(x)
     y = nn_ops.selu(x)
   return tape.gradient(y, x)
Beispiel #10
0
 def _testSelu(self, np_features):
   np_selu = self._npSelu(np_features)
   tf_selu = nn_ops.selu(np_features)
   self.assertAllClose(np_selu, tf_selu)
   self.assertShapeEqual(np_selu, tf_selu)