コード例 #1
0
 def testWrongDimensions(self):
   # The input to self_adjoint_eig should be a tensor of
   # at least rank 2.
   scalar = constant_op.constant(1.)
   with self.assertRaises(ValueError):
     linalg_ops.self_adjoint_eig(scalar)
   vector = constant_op.constant([1., 2.])
   with self.assertRaises(ValueError):
     linalg_ops.self_adjoint_eig(vector)
コード例 #2
0
  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    np_dtype = dtype_.as_numpy_dtype
    a = np.random.uniform(
        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
    if dtype_.is_complex:
      a += 1j * np.random.uniform(
          low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
    a += np.conj(a.T)
    a = np.tile(a, batch_shape + (1, 1))
    if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
      atol = 1e-4
    else:
      atol = 1e-12
    np_e, np_v = np.linalg.eigh(a)
    with self.test_session():
      if compute_v_:
        tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))

        # Check that V*diag(E)*V^T is close to A.
        a_ev = math_ops.matmul(
            math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
            tf_v,
            adjoint_b=True)
        self.assertAllClose(a_ev.eval(), a, atol=atol)

        # Compare to numpy.linalg.eigh.
        CompareEigenDecompositions(self, np_e, np_v,
                                   tf_e.eval(), tf_v.eval(), atol)
      else:
        tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
        self.assertAllClose(
            np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
コード例 #3
0
 def Test(self):
     np.random.seed(1)
     n = shape_[-1]
     batch_shape = shape_[:-2]
     a = np.random.uniform(low=-1.0, high=1.0,
                           size=n * n).reshape([n, n]).astype(dtype_)
     a += a.T
     a = np.tile(a, batch_shape + (1, 1))
     # Optimal stepsize for central difference is O(epsilon^{1/3}).
     epsilon = np.finfo(dtype_).eps
     delta = 0.1 * epsilon**(1.0 / 3.0)
     # tolerance obtained by looking at actual differences using
     # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
     if dtype_ == np.float32:
         tol = 1e-2
     else:
         tol = 1e-7
     with self.test_session():
         tf_a = constant_op.constant(a)
         tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
         for b in tf_e, tf_v:
             x_init = np.random.uniform(low=-1.0, high=1.0, size=n *
                                        n).reshape([n, n]).astype(dtype_)
             x_init += x_init.T
             x_init = np.tile(x_init, batch_shape + (1, 1))
             theoretical, numerical = gradient_checker.compute_gradient(
                 tf_a,
                 tf_a.get_shape().as_list(),
                 b,
                 b.get_shape().as_list(),
                 x_init_value=x_init,
                 delta=delta)
             self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
コード例 #4
0
  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    np_dtype = dtype_.as_numpy_dtype
    a = np.random.uniform(
        low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
    if dtype_.is_complex:
      a += 1j * np.random.uniform(
          low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
    a += np.conj(a.T)
    a = np.tile(a, batch_shape + (1, 1))
    if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
      atol = 1e-4
    else:
      atol = 1e-12
    np_e, np_v = np.linalg.eigh(a)
    with self.session(use_gpu=True):
      if compute_v_:
        tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))

        # Check that V*diag(E)*V^T is close to A.
        a_ev = math_ops.matmul(
            math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
            tf_v,
            adjoint_b=True)
        self.assertAllClose(self.evaluate(a_ev), a, atol=atol)

        # Compare to numpy.linalg.eigh.
        CompareEigenDecompositions(self, np_e, np_v, self.evaluate(tf_e),
                                   self.evaluate(tf_v), atol)
      else:
        tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
        self.assertAllClose(
            np.sort(np_e, -1), np.sort(self.evaluate(tf_e), -1), atol=atol)
コード例 #5
0
def _psd_mask(x):
    """Computes whether each square matrix in the input is positive semi-definite.

  Args:
    x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.

  Returns:
    mask: A floating-point `Tensor` of shape `[B1, ... Bn]`.  Each
      scalar is 1 if the corresponding matrix was PSD, otherwise 0.
  """
    # Allegedly
    # https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
    # it is more efficient to test for positive semi-definiteness by
    # trying to compute the Cholesky decomposition -- the matrix is PSD
    # if you succeed and not PSD if you fail.  However, TensorFlow's
    # Cholesky raises an exception if _any_ of the input matrices are
    # not PSD, from which I don't know how to extract _which ones_, so I
    # proceed by explicitly computing all the eigenvalues and checking
    # whether they are all positive or not.
    #
    # Also, as was discussed in the answer, it is somewhat dangerous to
    # treat SPD-ness as binary in floating-point arithmetic. Cholesky
    # factorization can complete and 'look' like everything is fine
    # (e.g., O(1) entries and a diagonal of all ones) but the matrix can
    # have an exponential condition number.
    eigenvalues, _ = linalg_ops.self_adjoint_eig(x)
    return math_ops.cast(math_ops.reduce_min(eigenvalues, axis=-1) >= 0,
                         dtype=x.dtype)
コード例 #6
0
 def Test(self):
   np.random.seed(1)
   n = shape_[-1]
   batch_shape = shape_[:-2]
   a = np.random.uniform(
       low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
   a += np.conj(a.T)
   a = np.tile(a, batch_shape + (1, 1))
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(dtype_).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   # tolerance obtained by looking at actual differences using
   # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
   if dtype_ == np.float32:
     tol = 1e-2
   else:
     tol = 1e-7
   with self.test_session():
     tf_a = constant_op.constant(a)
     tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
     for b in tf_e, tf_v:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(dtype_)
       x_init += np.conj(x_init.T)
       x_init = np.tile(x_init, batch_shape + (1, 1))
       theoretical, numerical = gradient_checker.compute_gradient(
           tf_a,
           tf_a.get_shape().as_list(),
           b,
           b.get_shape().as_list(),
           x_init_value=x_init,
           delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
コード例 #7
0
    def Test(self):
        np.random.seed(1)
        n = shape_[-1]
        batch_shape = shape_[:-2]
        a = np.random.uniform(low=-1.0, high=1.0,
                              size=n * n).reshape([n, n]).astype(dtype_)
        a += a.T
        a = np.tile(a, batch_shape + (1, 1))
        if dtype_ == np.float32 or dtype_ == np.complex64:
            atol = 1e-4
        else:
            atol = 1e-12
        for compute_v in False, True:
            np_e, np_v = np.linalg.eig(a)
            with self.test_session():
                if compute_v:
                    tf_e, tf_v = linalg_ops.self_adjoint_eig(
                        constant_op.constant(a))

                    # Check that V*diag(E)*V^T is close to A.
                    a_ev = math_ops.matmul(math_ops.matmul(
                        tf_v, array_ops.matrix_diag(tf_e)),
                                           tf_v,
                                           adjoint_b=True)
                    self.assertAllClose(a_ev.eval(), a, atol=atol)

                    # Compare to numpy.linalg.eig.
                    CompareEigenDecompositions(self, np_e, np_v, tf_e.eval(),
                                               tf_v.eval(), atol)
                else:
                    tf_e = linalg_ops.self_adjoint_eigvals(
                        constant_op.constant(a))
                    self.assertAllClose(np.sort(np_e, -1),
                                        np.sort(tf_e.eval(), -1),
                                        atol=atol)
コード例 #8
0
ファイル: utils.py プロジェクト: Utsal20/poGANmon
def posdef_inv_eig(tensor, identity, damping):
    """Computes inverse(tensor + damping * identity) with eigendecomposition."""
    eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(tensor +
                                                            damping * identity)
    return math_ops.matmul(eigenvectors / eigenvalues,
                           eigenvectors,
                           transpose_b=True)
コード例 #9
0
def posdef_inv_eig(tensor, identity, damping):
    """Computes inverse(tensor + damping * identity) with eigendecomposition."""
    eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(
        tensor + damping * identity)
    # TODO(GD): it's a little hacky
    eigenvalues = gen_math_ops.maximum(eigenvalues, damping)
    return math_ops.matmul(
        eigenvectors / eigenvalues, eigenvectors, transpose_b=True)
コード例 #10
0
 def Test(self):
     np.random.seed(1)
     n = shape_[-1]
     batch_shape = shape_[:-2]
     np_dtype = dtype_.as_numpy_dtype
     a = np.random.uniform(low=-1.0, high=1.0,
                           size=n * n).reshape([n, n]).astype(np_dtype)
     if dtype_.is_complex:
         a += 1j * np.random.uniform(low=-1.0, high=1.0, size=n *
                                     n).reshape([n, n]).astype(np_dtype)
     a += np.conj(a.T)
     a = np.tile(a, batch_shape + (1, 1))
     # Optimal stepsize for central difference is O(epsilon^{1/3}).
     epsilon = np.finfo(np_dtype).eps
     delta = 0.1 * epsilon**(1.0 / 3.0)
     # tolerance obtained by looking at actual differences using
     # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
     if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
         tol = 1e-2
     else:
         tol = 1e-7
     with self.session(use_gpu=True):
         tf_a = constant_op.constant(a)
         if compute_v_:
             tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
             # (complex) Eigenvectors are only unique up to an arbitrary phase
             # We normalize the vectors such that the first component has phase 0.
             top_rows = tf_v[..., 0:1, :]
             if tf_a.dtype.is_complex:
                 angle = -math_ops.angle(top_rows)
                 phase = math_ops.complex(math_ops.cos(angle),
                                          math_ops.sin(angle))
             else:
                 phase = math_ops.sign(top_rows)
             tf_v *= phase
             outputs = [tf_e, tf_v]
         else:
             tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
             outputs = [tf_e]
         for b in outputs:
             x_init = np.random.uniform(low=-1.0, high=1.0, size=n *
                                        n).reshape([n, n]).astype(np_dtype)
             if dtype_.is_complex:
                 x_init += 1j * np.random.uniform(
                     low=-1.0, high=1.0, size=n * n).reshape(
                         [n, n]).astype(np_dtype)
             x_init += np.conj(x_init.T)
             x_init = np.tile(x_init, batch_shape + (1, 1))
             theoretical, numerical = gradient_checker.compute_gradient(
                 tf_a,
                 tf_a.get_shape().as_list(),
                 b,
                 b.get_shape().as_list(),
                 x_init_value=x_init,
                 delta=delta)
             self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
コード例 #11
0
 def make_eigen_basis_update_ops(self):
     ops = []
     with variable_scope.variable_scope(self._var_scope):
         for damping, eigen_basis in self._eigen_basis.items():
             new_value, new_basis = linalg_ops.self_adjoint_eig(
                 self._cov +
                 damping * linalg_ops.eye(self._cov.shape.as_list()[0]))
             ops.append(eigen_basis.assign(new_basis))
             ops.append(self._eigen_value[damping].assign(new_value))
         return control_flow_ops.group(*ops)
コード例 #12
0
 def Test(self):
   np.random.seed(1)
   n = shape_[-1]
   batch_shape = shape_[:-2]
   np_dtype = dtype_.as_numpy_dtype
   a = np.random.uniform(
       low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   if dtype_.is_complex:
     a += 1j * np.random.uniform(
         low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   a += np.conj(a.T)
   a = np.tile(a, batch_shape + (1, 1))
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(np_dtype).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   # tolerance obtained by looking at actual differences using
   # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
   if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
     tol = 1e-2
   else:
     tol = 1e-7
   with self.session(use_gpu=True):
     tf_a = constant_op.constant(a)
     if compute_v_:
       tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
       # (complex) Eigenvectors are only unique up to an arbitrary phase
       # We normalize the vectors such that the first component has phase 0.
       top_rows = tf_v[..., 0:1, :]
       if tf_a.dtype.is_complex:
         angle = -math_ops.angle(top_rows)
         phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
       else:
         phase = math_ops.sign(top_rows)
       tf_v *= phase
       outputs = [tf_e, tf_v]
     else:
       tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
       outputs = [tf_e]
     for b in outputs:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       if dtype_.is_complex:
         x_init += 1j * np.random.uniform(
             low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       x_init += np.conj(x_init.T)
       x_init = np.tile(x_init, batch_shape + (1, 1))
       theoretical, numerical = gradient_checker.compute_gradient(
           tf_a,
           tf_a.get_shape().as_list(),
           b,
           b.get_shape().as_list(),
           x_init_value=x_init,
           delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
コード例 #13
0
 def testConcurrentExecutesWithoutError(self):
   all_ops = []
   with self.session(use_gpu=True) as sess:
     for compute_v_ in True, False:
       matrix1 = random_ops.random_normal([5, 5], seed=42)
       matrix2 = random_ops.random_normal([5, 5], seed=42)
       if compute_v_:
         e1, v1 = linalg_ops.self_adjoint_eig(matrix1)
         e2, v2 = linalg_ops.self_adjoint_eig(matrix2)
         all_ops += [e1, v1, e2, v2]
       else:
         e1 = linalg_ops.self_adjoint_eigvals(matrix1)
         e2 = linalg_ops.self_adjoint_eigvals(matrix2)
         all_ops += [e1, e2]
     val = sess.run(all_ops)
     self.assertAllEqual(val[0], val[2])
     # The algorithm is slightly different for compute_v being True and False,
     # so require approximate equality only here.
     self.assertAllClose(val[2], val[4])
     self.assertAllEqual(val[4], val[5])
     self.assertAllEqual(val[1], val[3])
コード例 #14
0
 def testConcurrentExecutesWithoutError(self):
     all_ops = []
     with self.session(use_gpu=True) as sess:
         for compute_v_ in True, False:
             matrix1 = random_ops.random_normal([5, 5], seed=42)
             matrix2 = random_ops.random_normal([5, 5], seed=42)
             if compute_v_:
                 e1, v1 = linalg_ops.self_adjoint_eig(matrix1)
                 e2, v2 = linalg_ops.self_adjoint_eig(matrix2)
                 all_ops += [e1, v1, e2, v2]
             else:
                 e1 = linalg_ops.self_adjoint_eigvals(matrix1)
                 e2 = linalg_ops.self_adjoint_eigvals(matrix2)
                 all_ops += [e1, e2]
         val = self.evaluate(all_ops)
         self.assertAllEqual(val[0], val[2])
         # The algorithm is slightly different for compute_v being True and False,
         # so require approximate equality only here.
         self.assertAllClose(val[2], val[4])
         self.assertAllEqual(val[4], val[5])
         self.assertAllEqual(val[1], val[3])
コード例 #15
0
 def Compute(x):
   e, v = linalg_ops.self_adjoint_eig(x)
   # (complex) Eigenvectors are only unique up to an arbitrary phase
   # We normalize the vectors such that the first component has phase 0.
   top_rows = v[..., 0:1, :]
   if dtype_.is_complex:
     angle = -math_ops.angle(top_rows)
     phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
   else:
     phase = math_ops.sign(top_rows)
   v *= phase
   return e, v
コード例 #16
0
    def get_eigendecomp(self):
        """Creates or retrieves eigendecomposition of self._cov."""
        # Unlike get_inverse and get_matpower this doesn't retrieve a stored
        # variable, but instead always computes a fresh version from the current
        # value of get_cov().
        if not self._eigendecomp:
            eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(self._cov)

            # The matrix self._cov is positive semidefinite by construction, but the
            # numerical eigenvalues could be negative due to numerical errors, so here
            # we clip them to be at least FLAGS.eigenvalue_clipping_threshold
            clipped_eigenvalues = math_ops.maximum(
                eigenvalues, EIGENVALUE_CLIPPING_THRESHOLD)
            self._eigendecomp = (clipped_eigenvalues, eigenvectors)

        return self._eigendecomp
コード例 #17
0
    def register_eigendecomp(self):
        """Registers an eigendecomposition.
        Unlike register_damp_inverse and register_matpower this doesn't create
        any variables or inverse ops.  Instead it merely makes tensors containing
        the eigendecomposition available to anyone that wants them.  They will be
        recomputed (once) for each session.run() call (when they needed by some op).
        """
        if not self._eigendecomp:
            eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(self._cov)

            # The matrix self._cov is positive semidefinite by construction, but the
            # numerical eigenvalues could be negative due to numerical errors, so here
            # we clip them to be at least FLAGS.eigenvalue_clipping_threshold
            clipped_eigenvalues = math_ops.maximum(
                eigenvalues, EIGENVALUE_CLIPPING_THRESHOLD)
            self._eigendecomp = (clipped_eigenvalues, eigenvectors)
コード例 #18
0
 def testMatrixThatFailsWhenFlushingDenormsToZero(self):
   # Test a 32x32 matrix which is known to fail if denorm floats are flushed to
   # zero.
   matrix = np.genfromtxt(
       test.test_src_dir_path(
           "python/kernel_tests/testdata/"
           "self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
   self.assertEqual(matrix.shape, (32, 32))
   matrix_tensor = constant_op.constant(matrix)
   with self.session(use_gpu=True) as sess:
     (e, v) = sess.run(linalg_ops.self_adjoint_eig(matrix_tensor))
     self.assertEqual(e.size, 32)
     self.assertAllClose(
         np.matmul(v, v.transpose()), np.eye(32, dtype=np.float32), atol=2e-3)
     self.assertAllClose(matrix,
                         np.matmul(np.matmul(v, np.diag(e)), v.transpose()))
コード例 #19
0
 def testMatrixThatFailsWhenFlushingDenormsToZero(self):
   # Test a 32x32 matrix which is known to fail if denorm floats are flushed to
   # zero.
   matrix = np.genfromtxt(
       test.test_src_dir_path(
           "python/kernel_tests/testdata/"
           "self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
   self.assertEqual(matrix.shape, (32, 32))
   matrix_tensor = constant_op.constant(matrix)
   with self.session(use_gpu=True) as sess:
     (e, v) = self.evaluate(linalg_ops.self_adjoint_eig(matrix_tensor))
     self.assertEqual(e.size, 32)
     self.assertAllClose(
         np.matmul(v, v.transpose()), np.eye(32, dtype=np.float32), atol=2e-3)
     self.assertAllClose(matrix,
                         np.matmul(np.matmul(v, np.diag(e)), v.transpose()))
コード例 #20
0
  def get_eigendecomp(self):
    """Creates or retrieves eigendecomposition of self._cov."""
    # Unlike get_inverse and get_matpower this doesn't retrieve a stored
    # variable, but instead always computes a fresh version from the current
    # value of get_cov().
    if not self._eigendecomp:
      eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(self._cov)

      # The matrix self._cov is positive semidefinite by construction, but the
      # numerical eigenvalues could be negative due to numerical errors, so here
      # we clip them to be at least FLAGS.eigenvalue_clipping_threshold
      clipped_eigenvalues = math_ops.maximum(eigenvalues,
                                             EIGENVALUE_CLIPPING_THRESHOLD)
      self._eigendecomp = (clipped_eigenvalues, eigenvectors)

    return self._eigendecomp
コード例 #21
0
  def _test(self, dtype, shape):
    np.random.seed(1)
    x_np = np.random.uniform(
        low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
    x_np = x_np + np.swapaxes(x_np, -1, -2)
    n = shape[-1]

    e_np, _ = np.linalg.eigh(x_np)
    with self.cached_session() as sess:
      x_tf = array_ops.placeholder(dtype)
      with self.test_scope():
        e, v = linalg_ops.self_adjoint_eig(x_tf)
      e_val, v_val = sess.run([e, v], feed_dict={x_tf: x_np})

      v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
      self.assertAlmostEqual(np.mean(v_diff**2), 0.0, delta=1e-6)
      self.assertAlmostEqual(np.mean((e_val - e_np)**2), 0.0, delta=1e-6)
コード例 #22
0
  def register_eigendecomp(self):
    """Registers an eigendecomposition.

    Unlike register_damp_inverse and register_matpower this doesn't create
    any variables or inverse ops.  Instead it merely makes tensors containing
    the eigendecomposition available to anyone that wants them.  They will be
    recomputed (once) for each session.run() call (when they needed by some op).
    """
    if not self._eigendecomp:
      eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(self._cov)

      # The matrix self._cov is positive semidefinite by construction, but the
      # numerical eigenvalues could be negative due to numerical errors, so here
      # we clip them to be at least FLAGS.eigenvalue_clipping_threshold
      clipped_eigenvalues = math_ops.maximum(eigenvalues,
                                             EIGENVALUE_CLIPPING_THRESHOLD)
      self._eigendecomp = (clipped_eigenvalues, eigenvectors)
コード例 #23
0
    def _test(self, dtype, shape):
        np.random.seed(1)
        x_np = np.random.uniform(
            low=-1.0, high=1.0,
            size=np.prod(shape)).reshape(shape).astype(dtype)
        x_np = x_np + np.swapaxes(x_np, -1, -2)
        n = shape[-1]

        e_np, _ = np.linalg.eigh(x_np)
        with self.session() as sess:
            x_tf = array_ops.placeholder(dtype)
            with self.test_scope():
                e, v = linalg_ops.self_adjoint_eig(x_tf)
            e_val, v_val = sess.run([e, v], feed_dict={x_tf: x_np})

            v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
            self.assertAlmostEqual(np.mean(v_diff**2), 0.0, delta=1e-6)
            self.assertAlmostEqual(np.mean((e_val - e_np)**2), 0.0, delta=1e-6)
コード例 #24
0
 def register_eigen_basis(self, damping):
     """ With current value of covariance matrix, initialize Eigen-basis.
     :return: None
     """
     with variable_scope.variable_scope(self._var_scope):
         initial_values, initial_basis = \
             linalg_ops.self_adjoint_eig(self._cov +
                                         damping * linalg_ops.eye(self._cov.shape.as_list()[0]))
         eigen_basis = variable_scope.get_variable("basis",
                                                   initializer=initial_basis,
                                                   trainable=False,
                                                   dtype=self._dtype)
         eigen_value = variable_scope.get_variable("values",
                                                   initializer=initial_values,
                                                   trainable=False,
                                                   dtype=self._dtype)
         self._eigen_basis[damping] = eigen_basis
         self._eigen_value[damping] = eigen_value
コード例 #25
0
ファイル: linalg_grad.py プロジェクト: 1000sprites/tensorflow
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
  """Gradient for SelfAdjointEigV2."""
  e = op.outputs[0]
  compute_v = op.get_attr("compute_v")
  # a = op.inputs[0], which satisfies
  # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
  with ops.control_dependencies([grad_e, grad_v]):
    if compute_v:
      v = op.outputs[1]
      # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
      # Notice that because of the term involving f, the gradient becomes
      # infinite (or NaN in practice) when eigenvalues are not unique.
      # Mathematically this should not be surprising, since for (k-fold)
      # degenerate eigenvalues, the corresponding eigenvectors are only defined
      # up to arbitrary rotation in a (k-dimensional) subspace.
      f = array_ops.matrix_set_diag(
          math_ops.reciprocal(
              array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
          array_ops.zeros_like(e))
      grad_a = math_ops.matmul(
          v,
          math_ops.matmul(
              array_ops.matrix_diag(grad_e) +
              f * math_ops.matmul(v, grad_v, adjoint_a=True),
              v,
              adjoint_b=True))
    else:
      _, v = linalg_ops.self_adjoint_eig(op.inputs[0])
      grad_a = math_ops.matmul(v,
                               math_ops.matmul(
                                   array_ops.matrix_diag(grad_e),
                                   v,
                                   adjoint_b=True))
    # The forward op only depends on the lower triangular part of a, so here we
    # symmetrize and take the lower triangle
    grad_a = array_ops.matrix_band_part(
        grad_a + math_ops.conj(array_ops.matrix_transpose(grad_a)), -1, 0)
    grad_a = array_ops.matrix_set_diag(grad_a,
                                       0.5 * array_ops.matrix_diag_part(grad_a))
    return grad_a
コード例 #26
0
def posdef_inv_eig(tensor, identity, damping):
    """Computes inverse(tensor + damping * identity) with eigendecomposition."""
    # # this works
    # with tf.device('/cpu:0'):
    #     eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(
    #         tensor + damping * identity)

    # # this doesn't work
    # eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(
    #     tensor + damping * identity)

    # this works
    eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(
        tf.to_double(tensor + damping * identity))
    eigenvalues, eigenvectors = tf.to_float(eigenvalues), tf.to_float(
        eigenvectors)

    # TODO(GD): it's a little hacky
    eigenvalues = gen_math_ops.maximum(eigenvalues, damping)
    return math_ops.matmul(eigenvectors / eigenvalues,
                           eigenvectors,
                           transpose_b=True)
コード例 #27
0
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
    """Gradient for SelfAdjointEigV2."""
    e = op.outputs[0]
    compute_v = op.get_attr("compute_v")
    # a = op.inputs[0], which satisfies
    # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
    with ops.control_dependencies([grad_e, grad_v]):
        if compute_v:
            v = op.outputs[1]
            # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
            # Notice that because of the term involving f, the gradient becomes
            # infinite (or NaN in practice) when eigenvalues are not unique.
            # Mathematically this should not be surprising, since for (k-fold)
            # degenerate eigenvalues, the corresponding eigenvectors are only defined
            # up to arbitrary rotation in a (k-dimensional) subspace.
            f = array_ops.matrix_set_diag(
                math_ops.reciprocal(
                    array_ops.expand_dims(e, -2) -
                    array_ops.expand_dims(e, -1)), array_ops.zeros_like(e))
            grad_a = math_ops.matmul(
                v,
                math_ops.matmul(array_ops.matrix_diag(grad_e) +
                                f * math_ops.matmul(v, grad_v, adjoint_a=True),
                                v,
                                adjoint_b=True))
        else:
            _, v = linalg_ops.self_adjoint_eig(op.inputs[0])
            grad_a = math_ops.matmul(
                v,
                math_ops.matmul(array_ops.matrix_diag(grad_e),
                                v,
                                adjoint_b=True))
        # The forward op only depends on the lower triangular part of a, so here we
        # symmetrize and take the lower triangle
        grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a),
                                            -1, 0)
        grad_a = array_ops.matrix_set_diag(
            grad_a, 0.5 * array_ops.matrix_diag_part(grad_a))
        return grad_a
コード例 #28
0
 def loop_fn(i):
     return (linalg_ops.self_adjoint_eig(array_ops.gather(x, i)),
             linalg_ops.self_adjoint_eigvals(array_ops.gather(x, i)))
コード例 #29
0
def posdef_eig_self_adjoint(mat):
    """Computes eigendecomposition using self_adjoint_eig."""
    evals, evecs = linalg_ops.self_adjoint_eig(mat)
    evals = math_ops.abs(evals)  # Should be equivalent to svd approach.

    return evals, evecs
コード例 #30
0
 def register_eigendecomp(self):
   """Registers that an eigendecomposition is needed by a FisherBlock."""
   if not self._eigendecomp:
     self._eigendecomp = linalg_ops.self_adjoint_eig(self._cov)
コード例 #31
0
ファイル: utils.py プロジェクト: abidrahmank/tensorflow
def posdef_inv_eig(tensor, identity, damping):
  """Computes inverse(tensor + damping * identity) with eigendecomposition."""
  eigenvalues, eigenvectors = linalg_ops.self_adjoint_eig(
      tensor + damping * identity)
  return math_ops.matmul(
      eigenvectors / eigenvalues, eigenvectors, transpose_b=True)
コード例 #32
0
ファイル: utils.py プロジェクト: DILASSS/tensorflow
def posdef_eig_self_adjoint(mat):
  """Computes eigendecomposition using self_adjoint_eig."""
  evals, evecs = linalg_ops.self_adjoint_eig(mat)
  evals = math_ops.abs(evals)  # Should be equivalent to svd approach.

  return evals, evecs