def testConcurrentExecutesWithoutError(self):
   with self.session(use_gpu=True) as sess:
     matrix1 = math_ops.cast(
         random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
     matrix2 = math_ops.cast(
         random_ops.random_normal([5, 5], seed=42), dtypes.complex64)
     logm1 = gen_linalg_ops.matrix_logarithm(matrix1)
     logm2 = gen_linalg_ops.matrix_logarithm(matrix2)
     logm = sess.run([logm1, logm2])
     self.assertAllEqual(logm[0], logm[1])
Exemple #2
0
 def testConcurrentExecutesWithoutError(self):
     with self.session(use_gpu=True) as sess:
         matrix1 = math_ops.cast(random_ops.random_normal([5, 5], seed=42),
                                 dtypes.complex64)
         matrix2 = math_ops.cast(random_ops.random_normal([5, 5], seed=42),
                                 dtypes.complex64)
         logm1 = gen_linalg_ops.matrix_logarithm(matrix1)
         logm2 = gen_linalg_ops.matrix_logarithm(matrix2)
         logm = self.evaluate([logm1, logm2])
         self.assertAllEqual(logm[0], logm[1])
 def testConcurrentExecutesWithoutError(self):
   matrix_shape = [5, 5]
   seed = [42, 24]
   matrix1 = math_ops.cast(
       stateless_random_ops.stateless_random_normal(matrix_shape, seed=seed),
       dtypes.complex64)
   matrix2 = math_ops.cast(
       stateless_random_ops.stateless_random_normal(matrix_shape, seed=seed),
       dtypes.complex64)
   self.assertAllEqual(matrix1, matrix2)
   logm1 = gen_linalg_ops.matrix_logarithm(matrix1)
   logm2 = gen_linalg_ops.matrix_logarithm(matrix2)
   logm = self.evaluate([logm1, logm2])
   self.assertAllEqual(logm[0], logm[1])
Exemple #4
0
 def _verifyLogarithm(self, x, np_type):
     inp = x.astype(np_type)
     with test_util.use_gpu():
         # Verify that expm(logm(A)) == A.
         tf_ans = linalg_impl.matrix_exponential(
             gen_linalg_ops.matrix_logarithm(inp))
         out = self.evaluate(tf_ans)
         self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def _verifyLogarithm(self, x, np_type):
   inp = x.astype(np_type)
   with self.cached_session(use_gpu=True):
     # Verify that expm(logm(A)) == A.
     tf_ans = linalg_impl.matrix_exponential(
         gen_linalg_ops.matrix_logarithm(inp))
     out = tf_ans.eval()
     self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
Exemple #6
0
 def _verifyLogarithm(self, x, np_type):
     inp = x.astype(np_type)
     with self.cached_session(use_gpu=True):
         # Verify that expm(logm(A)) == A.
         tf_ans = linalg_impl.matrix_exponential(
             gen_linalg_ops.matrix_logarithm(inp))
         out = tf_ans.eval()
         self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def _verifyLogarithm(self, x, np_type):
   inp = x.astype(np_type)
   with test_util.use_gpu():
     # Verify that expm(logm(A)) == A.
     tf_ans = linalg_impl.matrix_exponential(
         gen_linalg_ops.matrix_logarithm(inp))
     out = self.evaluate(tf_ans)
     self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
Exemple #8
0
 def benchmarkMatrixLogarithmOp(self):
     for shape in self.shapes:
         with ops.Graph().as_default(), \
             session.Session(config=benchmark.benchmark_config()) as sess, \
             ops.device("/cpu:0"):
             matrix = self._GenerateMatrix(shape)
             logm = gen_linalg_ops.matrix_logarithm(matrix)
             variables.global_variables_initializer().run()
             self.run_op_benchmark(
                 sess,
                 control_flow_ops.group(logm),
                 min_iters=25,
                 name="matrix_logarithm_cpu_{shape}".format(shape=shape))
 def benchmarkMatrixLogarithmOp(self):
   for shape in self.shapes:
     with ops.Graph().as_default(), \
         session.Session(config=benchmark.benchmark_config()) as sess, \
         ops.device("/cpu:0"):
       matrix = self._GenerateMatrix(shape)
       logm = gen_linalg_ops.matrix_logarithm(matrix)
       variables.global_variables_initializer().run()
       self.run_op_benchmark(
           sess,
           control_flow_ops.group(logm),
           min_iters=25,
           name="matrix_logarithm_cpu_{shape}".format(
               shape=shape))
Exemple #10
0
 def testWrongDimensions(self):
     # The input to the logarithm should be at least a 2-dimensional tensor.
     tensor3 = constant_op.constant([1., 2.], dtype=dtypes.complex64)
     with self.assertRaises(ValueError):
         gen_linalg_ops.matrix_logarithm(tensor3)
Exemple #11
0
 def testNonSquareMatrix(self):
     # When the logarithm of a non-square matrix is attempted we should return
     # an error
     with self.assertRaises(ValueError):
         gen_linalg_ops.matrix_logarithm(
             np.array([[1., 2., 3.], [3., 4., 5.]], dtype=np.complex64))
 def testWrongDimensions(self):
   # The input to the logarithm should be at least a 2-dimensional tensor.
   tensor3 = constant_op.constant([1., 2.], dtype=dtypes.complex64)
   with self.assertRaises(ValueError):
     gen_linalg_ops.matrix_logarithm(tensor3)
 def testNonSquareMatrix(self):
   # When the logarithm of a non-square matrix is attempted we should return
   # an error
   with self.assertRaises(ValueError):
     gen_linalg_ops.matrix_logarithm(
         np.array([[1., 2., 3.], [3., 4., 5.]], dtype=np.complex64))