Пример #1
0
 def test_tensor_float_32_global_variable(self):
     self.assertTrue(config.tensor_float_32_execution_enabled())
     self.assertTrue(test_ops.is_tensor_float32_enabled())
     config.enable_tensor_float_32_execution(False)
     self.assertFalse(config.tensor_float_32_execution_enabled())
     self.assertFalse(test_ops.is_tensor_float32_enabled())
     config.enable_tensor_float_32_execution(True)
     self.assertTrue(config.tensor_float_32_execution_enabled())
     self.assertTrue(test_ops.is_tensor_float32_enabled())
Пример #2
0
  def test_tensor_float_32_disabled(self):
    self.assertTrue(config.tensor_float_32_execution_enabled())
    config.enable_tensor_float_32_execution(False)
    self.assertFalse(config.tensor_float_32_execution_enabled())

    x = array_ops.fill((8, 8), 1 + 2**-20)
    y = array_ops.ones((8, 8))
    out = math_ops.matmul(x, y)
    expected = array_ops.fill((8, 8), 8 * (1 + 2**-20))
    self.assertAllEqual(out, expected)
Пример #3
0
 def decorated(self, *args, **kwargs):
   allowed = config.tensor_float_32_execution_enabled()
   try:
     config.enable_tensor_float_32_execution(False)
     f(self, *args, **kwargs)
   finally:
     config.enable_tensor_float_32_execution(allowed)
Пример #4
0
 def setUp(self):
     self.tf32_keep_ = config.tensor_float_32_execution_enabled()
     config.enable_tensor_float_32_execution(False)
     # Increase from 1e-6 to 1e-4
     self._atol[dtypes.float32] = 1e-4
     self._atol[dtypes.complex64] = 1e-4
     self._rtol[dtypes.float32] = 1e-4
     self._rtol[dtypes.complex64] = 1e-4
Пример #5
0
 def setUp(self):
     self.tf32_keep_ = config.tensor_float_32_execution_enabled()
     config.enable_tensor_float_32_execution(False)
     # Increase from 1e-6 to 1e-4
     self._atol[dtypes.float32] = 1e-4
     self._atol[dtypes.complex64] = 1e-4
     self._rtol[dtypes.float32] = 1e-4
     self._rtol[dtypes.complex64] = 1e-4
     super(NonSquareLinearOperatorBlockDiagTest, self).setUp()
Пример #6
0
  def test_tf32_enabled(self):
    self.assertTrue(config.tensor_float_32_execution_enabled())

    x = array_ops.fill((8, 8), 1 + 2**-20)
    y = array_ops.ones((8, 8))
    out = math_ops.matmul(x, y)
    # In tf32, each element of x is rounded to 1, so the output will be 8s.
    expected = array_ops.fill((8, 8), 8)
    self.assertAllEqual(out, expected)
Пример #7
0
 def setUp(self):
     self.tf32_keep_ = config.tensor_float_32_execution_enabled()
     config.enable_tensor_float_32_execution(False)
     # Decrease tolerance since we are testing with condition numbers as high as
     # 1e4.
     self._atol[dtypes.float32] = 1e-5
     self._rtol[dtypes.float32] = 1e-5
     self._atol[dtypes.float64] = 1e-10
     self._rtol[dtypes.float64] = 1e-10
     self._rtol[dtypes.complex64] = 1e-4
Пример #8
0
 def setUp(self):
     self.tf32_keep_ = config.tensor_float_32_execution_enabled()
     config.enable_tensor_float_32_execution(False)
     # Decrease tolerance since we are testing with condition numbers as high as
     # 1e4.  This class does not use Cholesky, and thus needs even looser
     # tolerance.
     self._atol[dtypes.float32] = 1e-4
     self._rtol[dtypes.float32] = 1e-4
     self._atol[dtypes.float64] = 1e-9
     self._rtol[dtypes.float64] = 1e-9
     self._rtol[dtypes.complex64] = 2e-4
Пример #9
0
 def setUp(self):
     self.tf32_keep_ = config.tensor_float_32_execution_enabled()
     config.enable_tensor_float_32_execution(False)
Пример #10
0
 def setUp(self):
     self.tf32_keep_ = config.tensor_float_32_execution_enabled()
     config.enable_tensor_float_32_execution(False)
     self._atol[dtypes.complex64] = 1e-5
     self._rtol[dtypes.complex64] = 1e-5