def benchmarkMatrixExponentialOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), \
          session.Session() as sess, \
          ops.device("/cpu:0"):
        matrix = self._GenerateMatrix(shape)
        expm = linalg_impl.matrix_exponential(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(expm),
            min_iters=25,
            name="matrix_exponential_cpu_{shape}".format(
                shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session() as sess, \
            ops.device("/gpu:0"):
          matrix = self._GenerateMatrix(shape)
          expm = linalg_impl.matrix_exponential(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(expm),
              min_iters=25,
              name="matrix_exponential_gpu_{shape}".format(
                  shape=shape))
 def testConcurrentExecutesWithoutError(self):
   with self.test_session(use_gpu=True) as sess:
     matrix1 = random_ops.random_normal([5, 5], seed=42)
     matrix2 = random_ops.random_normal([5, 5], seed=42)
     expm1 = linalg_impl.matrix_exponential(matrix1)
     expm2 = linalg_impl.matrix_exponential(matrix2)
     expm = sess.run([expm1, expm2])
     self.assertAllEqual(expm[0], expm[1])
Exemple #3
0
 def _verifyLogarithm(self, x, np_type):
     inp = x.astype(np_type)
     with test_util.use_gpu():
         # Verify that expm(logm(A)) == A.
         tf_ans = linalg_impl.matrix_exponential(
             gen_linalg_ops.matrix_logarithm(inp))
         out = self.evaluate(tf_ans)
         self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def _verifyLogarithm(self, x, np_type):
   inp = x.astype(np_type)
   with self.cached_session(use_gpu=True):
     # Verify that expm(logm(A)) == A.
     tf_ans = linalg_impl.matrix_exponential(
         gen_linalg_ops.matrix_logarithm(inp))
     out = tf_ans.eval()
     self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
Exemple #5
0
 def _verifyLogarithm(self, x, np_type):
     inp = x.astype(np_type)
     with self.cached_session(use_gpu=True):
         # Verify that expm(logm(A)) == A.
         tf_ans = linalg_impl.matrix_exponential(
             gen_linalg_ops.matrix_logarithm(inp))
         out = tf_ans.eval()
         self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def _verifyLogarithm(self, x, np_type):
   inp = x.astype(np_type)
   with test_util.use_gpu():
     # Verify that expm(logm(A)) == A.
     tf_ans = linalg_impl.matrix_exponential(
         gen_linalg_ops.matrix_logarithm(inp))
     out = self.evaluate(tf_ans)
     self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
 def _verifyExponential(self, x, np_type):
   inp = x.astype(np_type)
   with self.test_session(use_gpu=True):
     tf_ans = linalg_impl.matrix_exponential(inp)
     if x.size == 0:
       np_ans = np.empty(x.shape, dtype=np_type)
     else:
       if x.ndim > 2:
         np_ans = np.zeros(inp.shape, dtype=np_type)
         for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
           np_ans[i] = np_expm(inp[i])
       else:
         np_ans = np_expm(inp)
     out = tf_ans.eval()
     self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
 def _verifyExponential(self, x, np_type):
     inp = x.astype(np_type)
     with test_util.use_gpu():
         with ops.device("/cpu:0"):
             tf_ans = linalg_impl.matrix_exponential(inp)
         if x.size == 0:
             np_ans = np.empty(x.shape, dtype=np_type)
         else:
             if x.ndim > 2:
                 np_ans = np.zeros(inp.shape, dtype=np_type)
                 for i in itertools.product(
                         *[range(x) for x in inp.shape[:-2]]):
                     np_ans[i] = np_expm(inp[i])
             else:
                 np_ans = np_expm(inp)
         out = self.evaluate(tf_ans)
         self.assertAllClose(np_ans, out, rtol=1e-3, atol=1e-3)
 def testDynamic(self):
   with self.test_session(use_gpu=True) as sess:
     inp = array_ops.placeholder(ops.dtypes.float32)
     expm = linalg_impl.matrix_exponential(inp)
     matrix = np.array([[1., 2.], [3., 4.]])
     sess.run(expm, feed_dict={inp: matrix})
 def testWrongDimensions(self):
   # The input to the exponential should be at least a 2-dimensional tensor.
   tensor3 = constant_op.constant([1., 2.])
   with self.assertRaises(ValueError):
     linalg_impl.matrix_exponential(tensor3)
 def testNonSquareMatrix(self):
   # When the exponential of a non-square matrix is attempted we should return
   # an error
   with self.assertRaises(ValueError):
     linalg_impl.matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
Exemple #12
0
 def testInfinite(self):
   # Check that the op does not loop forever on infinite inputs. (b/158433036)
   in_tensor = [[np.inf, 1.], [1., 1.]]
   result = self.evaluate(linalg_impl.matrix_exponential(in_tensor))
   self.assertTrue(np.all(np.isnan(result)))
 def testInfinite(self):
     # Check that the op does not loop forever on infinite inputs. (b/158433036)
     in_tensor = np.random.rand(100, 100).astype(np.float)
     in_tensor[0][0] = np.inf
     with self.assertRaises(errors_impl.InvalidArgumentError):
         self.evaluate(linalg_impl.matrix_exponential(in_tensor))