def testConcurrentExecutesWithoutError(self):
   with self.test_session(use_gpu=True) as sess:
     matrix1 = random_ops.random_normal([5, 5], seed=42)
     matrix2 = random_ops.random_normal([5, 5], seed=42)
     expm1 = gen_linalg_ops._matrix_exponential(matrix1)
     expm2 = gen_linalg_ops._matrix_exponential(matrix2)
     expm = sess.run([expm1, expm2])
     self.assertAllEqual(expm[0], expm[1])
 def testConcurrentExecutesWithoutError(self):
     with self.test_session(use_gpu=True) as sess:
         matrix1 = random_ops.random_normal([5, 5], seed=42)
         matrix2 = random_ops.random_normal([5, 5], seed=42)
         expm1 = gen_linalg_ops._matrix_exponential(matrix1)
         expm2 = gen_linalg_ops._matrix_exponential(matrix2)
         expm = sess.run([expm1, expm2])
         self.assertAllEqual(expm[0], expm[1])
 def _verifyLogarithm(self, x, np_type):
   inp = x.astype(np_type)
   with self.test_session(use_gpu=True):
     # Verify that expm(logm(A)) == A.
     tf_ans = gen_linalg_ops._matrix_exponential(
         gen_linalg_ops._matrix_logarithm(inp))
     out = tf_ans.eval()
     self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def _verifyLogarithm(self, x, np_type):
     inp = x.astype(np_type)
     with self.test_session(use_gpu=True):
         # Verify that expm(logm(A)) == A.
         tf_ans = gen_linalg_ops._matrix_exponential(
             gen_linalg_ops._matrix_logarithm(inp))
         out = tf_ans.eval()
         self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def benchmarkMatrixExponentialOp(self):
     for shape in self.shapes:
         with ops.Graph().as_default(), \
             session.Session() as sess, \
             ops.device("/cpu:0"):
             matrix = self._GenerateMatrix(shape)
             expm = gen_linalg_ops._matrix_exponential(matrix)
             variables.global_variables_initializer().run()
             self.run_op_benchmark(
                 sess,
                 control_flow_ops.group(expm),
                 min_iters=25,
                 name="matrix_exponential_cpu_{shape}".format(shape=shape))
 def benchmarkMatrixExponentialOp(self):
   for shape in self.shapes:
     with ops.Graph().as_default(), \
         session.Session() as sess, \
         ops.device("/cpu:0"):
       matrix = self._GenerateMatrix(shape)
       expm = gen_linalg_ops._matrix_exponential(matrix)
       variables.global_variables_initializer().run()
       self.run_op_benchmark(
           sess,
           control_flow_ops.group(expm),
           min_iters=25,
           name="matrix_exponential_cpu_{shape}".format(
               shape=shape))
 def _verifyExponential(self, x, np_type):
   inp = x.astype(np_type)
   with self.test_session(use_gpu=True):
     tf_ans = gen_linalg_ops._matrix_exponential(inp)
     if x.size == 0:
       np_ans = np.empty(x.shape, dtype=np_type)
     else:
       if x.ndim > 2:
         np_ans = np.zeros(inp.shape, dtype=np_type)
         for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
           np_ans[i] = np_expm(inp[i])
       else:
         np_ans = np_expm(inp)
     out = tf_ans.eval()
     self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
Exemple #8
0
 def _verifyExponential(self, x, np_type):
   # TODO (pfau): add matrix logarithm and test that it is inverse of expm. id:2845 gh:2846
   inp = x.astype(np_type)
   with self.test_session(use_gpu=True):
     # Verify that x^{-1} * x == Identity matrix.
     tf_ans = gen_linalg_ops._matrix_exponential(inp)
     if x.size == 0:
       np_ans = np.empty(x.shape, dtype=np_type)
     else:
       if x.ndim > 2:
         np_ans = np.zeros(inp.shape, dtype=np_type)
         for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
           np_ans[i] = np_expm(inp[i])
       else:
         np_ans = np_expm(inp)
     out = tf_ans.eval()
     self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
 def _verifyExponential(self, x, np_type):
   # TODO(pfau): add matrix logarithm and test that it is inverse of expm.
   inp = x.astype(np_type)
   with self.test_session(use_gpu=True):
     # Verify that x^{-1} * x == Identity matrix.
     tf_ans = gen_linalg_ops._matrix_exponential(inp)
     if x.size == 0:
       np_ans = np.empty(x.shape, dtype=np_type)
     else:
       if x.ndim > 2:
         np_ans = np.zeros(inp.shape, dtype=np_type)
         for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
           np_ans[i] = np_expm(inp[i])
       else:
         np_ans = np_expm(inp)
     out = tf_ans.eval()
     self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
 def testWrongDimensions(self):
   # The input to the exponential should be at least a 2-dimensional tensor.
   tensor3 = constant_op.constant([1., 2.])
   with self.assertRaises(ValueError):
     gen_linalg_ops._matrix_exponential(tensor3)
 def testNonSquareMatrix(self):
   # When the exponential of a non-square matrix is attempted we should return
   # an error
   with self.assertRaises(ValueError):
     gen_linalg_ops._matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
 def testWrongDimensions(self):
     # The input to the inverse should be at least a 2-dimensional tensor.
     tensor3 = constant_op.constant([1., 2.])
     with self.assertRaises(ValueError):
         gen_linalg_ops._matrix_exponential(tensor3)
 def testNonSquareMatrix(self):
     # When the exponential of a non-square matrix is attempted we should return
     # an error
     with self.assertRaises(ValueError):
         gen_linalg_ops._matrix_exponential(
             np.array([[1., 2., 3.], [3., 4., 5.]]))