示例#1
0
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
    """Computes the singular value decompositions of one or more matrices.

  Computes the SVD of each inner matrix in `tensor` such that
  `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
  :])`

  ```python
  # a is a tensor.
  # s is a tensor of singular values.
  # u is a tensor of left singular vectors.
  # v is a tensor of right singular vectors.
  s, u, v = svd(a)
  s = svd(a, compute_uv=False)
  ```

  Args:
    tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
      `N`.
    full_matrices: If true, compute full-sized `u` and `v`. If false
      (the default), compute only the leading `P` singular vectors.
      Ignored if `compute_uv` is `False`.
    compute_uv: If `True` then left and right singular vectors will be
      computed and returned in `u` and `v`, respectively. Otherwise, only the
      singular values will be computed, which can be significantly faster.
    name: string, optional name of the operation.

  Returns:
    s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
      order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
      second largest, etc.
    u: Left singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
      `[..., M, M]`. Not returned if `compute_uv` is `False`.
    v: Right singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
      `[..., N, N]`. Not returned if `compute_uv` is `False`.

  @compatibility(numpy)
  Mostly equivalent to numpy.linalg.svd, except that the order of output
  arguments here is `s`, `u`, `v` when `compute_uv` is `True`, as opposed to
  `u`, `s`, `v` for numpy.linalg.svd.
  @end_compatibility
  """
    # pylint: disable=protected-access
    s, u, v = gen_linalg_ops._svd(tensor,
                                  compute_uv=compute_uv,
                                  full_matrices=full_matrices,
                                  name=name)
    # pylint: enable=protected-access
    if compute_uv:
        return math_ops.real(s), u, v
    else:
        return math_ops.real(s)
示例#2
0
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
  """Computes the singular value decompositions of one or more matrices.

  Computes the SVD of each inner matrix in `tensor` such that
  `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
  :])`

  ```prettyprint
  # a is a tensor.
  # s is a tensor of singular values.
  # u is a tensor of left singular vectors.
  # v is a tensor of right singular vectors.
  s, u, v = svd(a)
  s = svd(a, compute_uv=False)
  ```

  Args:
    tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
      `N`.
    full_matrices: If true, compute full-sized `u` and `v`. If false
      (the default), compute only the leading `P` singular vectors.
      Ignored if `compute_uv` is `False`.
    compute_uv: If `True` then left and right singular vectors will be
      computed and returned in `u` and `v`, respectively. Otherwise, only the
      singular values will be computed, which can be significantly faster.
    name: string, optional name of the operation.

  Returns:
    s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
      order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
      second largest, etc.
    u: Left singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
      `[..., M, M]`. Not returned if `compute_uv` is `False`.
    v: Right singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
      `[..., N, N]`. Not returned if `compute_uv` is `False`.

  @compatibility(numpy)
  Mostly equivalent to numpy.linalg.svd, except that the order of output
  arguments here is `s`, `u`, `v` when `compute_uv` is `True`, as opposed to
  `u`, `s`, `v` for numpy.linalg.svd.
  @end_compatibility
  """
  # pylint: disable=protected-access
  s, u, v = gen_linalg_ops._svd(
      tensor, compute_uv=compute_uv, full_matrices=full_matrices)
  # pylint: enable=protected-access
  if compute_uv:
    return math_ops.real(s), u, v
  else:
    return math_ops.real(s)
示例#3
0
  def _compareRealImag(self, cplx, use_gpu):
    np_real, np_imag = np.real(cplx), np.imag(cplx)
    np_zeros = np_real * 0

    with test_util.device(use_gpu=use_gpu):
      inx = ops.convert_to_tensor(cplx)
      tf_real = math_ops.real(inx)
      tf_imag = math_ops.imag(inx)
      tf_real_real = math_ops.real(tf_real)
      tf_imag_real = math_ops.imag(tf_real)
      self.assertAllEqual(np_real, self.evaluate(tf_real))
      self.assertAllEqual(np_imag, self.evaluate(tf_imag))
      self.assertAllEqual(np_real, self.evaluate(tf_real_real))
      self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
示例#4
0
 def _compareRealImag(self, cplx, use_gpu):
   np_real, np_imag = np.real(cplx), np.imag(cplx)
   np_zeros = np_real * 0
   with self.test_session(use_gpu=use_gpu,
                          force_gpu=use_gpu and test_util.is_gpu_available()):
     inx = ops.convert_to_tensor(cplx)
     tf_real = math_ops.real(inx)
     tf_imag = math_ops.imag(inx)
     tf_real_real = math_ops.real(tf_real)
     tf_imag_real = math_ops.imag(tf_real)
     self.assertAllEqual(np_real, self.evaluate(tf_real))
     self.assertAllEqual(np_imag, self.evaluate(tf_imag))
     self.assertAllEqual(np_real, self.evaluate(tf_real_real))
     self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
示例#5
0
    def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
        var_device, var_dtype = var.device, var.dtype.base_dtype
        coefficients = ((apply_state or {}).get((var_device, var_dtype))
                        or self._fallback_apply_state(var_device, var_dtype))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, 'm')
        m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
        m_t = state_ops.assign(m,
                               m * coefficients['beta_1_t'],
                               use_locking=self._use_locking)
        with ops.control_dependencies([m_t]):
            m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)

        # v_t = beta2 * v + (1 - beta2) * (g_t * conj(g_t))
        v = self.get_slot(var, 'v')
        v_scaled_g_values = (
            grad * math_ops.conj(grad)) * coefficients['one_minus_beta_2_t']
        v_t = state_ops.assign(v,
                               v * coefficients['beta_2_t'],
                               use_locking=self._use_locking)
        with ops.control_dependencies([v_t]):
            v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)

        if not self.amsgrad:
            v_sqrt = math_ops.sqrt(v_t)
            var_update = state_ops.assign_sub(
                var,
                coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']),
                use_locking=self._use_locking)
            return control_flow_ops.group(*[var_update, m_t, v_t])
        else:
            cxmax = lambda a, b: math_ops.cast(
                math_ops.maximum(math_ops.real(a), math_ops.real(b)), var_dtype
            )
            v_hat = self.get_slot(var, 'vhat')
            v_hat_t = cxmax(v_hat, v_t)
            with ops.control_dependencies([v_hat_t]):
                v_hat_t = state_ops.assign(v_hat,
                                           v_hat_t,
                                           use_locking=self._use_locking)
            v_hat_sqrt = math_ops.sqrt(v_hat_t)
            var_update = state_ops.assign_sub(
                var,
                coefficients['lr'] * m_t /
                (v_hat_sqrt + coefficients['epsilon']),
                use_locking=self._use_locking)
            return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t])
  def _operator_and_matrix(self, build_info, dtype, use_placeholder):
    shape = build_info.shape
    # For this test class, we are creating Hermitian spectrums.
    # We also want the spectrum to have eigenvalues bounded away from zero.
    #
    # pre_spectrum is bounded away from zero.
    pre_spectrum = linear_operator_test_util.random_uniform(
        shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
    pre_spectrum_c = _to_complex(pre_spectrum)

    # Real{IFFT[pre_spectrum]}
    #  = IFFT[EvenPartOf[pre_spectrum]]
    # is the IFFT of something that is also bounded away from zero.
    # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
    pre_h = math_ops.ifft2d(pre_spectrum_c)

    # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
    # So we will make spectrum = FFT[h], for real valued h.
    h = math_ops.real(pre_h)
    h_c = _to_complex(h)

    spectrum = math_ops.fft2d(h_c)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant2D(
        lin_op_spectrum, input_output_dtype=dtype)

    mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

    return operator, mat
示例#7
0
 def f(inx):
   inx.set_shape(x.shape)
   # func is a forward RFFT function (batched or unbatched).
   z = func(inx)
   # loss = sum(|z|^2)
   loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
   return loss
示例#8
0
def logdet(matrix, name=None):
  """Computes log of the determinant of a hermitian positive definite matrix.

  ```python
  # Compute the determinant of a matrix while reducing the chance of over- or
  underflow:
  A = ... # shape 10 x 10
  det = tf.exp(tf.linalg.logdet(A))  # scalar
  ```

  Args:
    matrix:  A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
      or `complex128` with shape `[..., M, M]`.
    name:  A name to give this `Op`.  Defaults to `logdet`.

  Returns:
    The natural log of the determinant of `matrix`.

  @compatibility(numpy)
  Equivalent to numpy.linalg.slogdet, although no sign is returned since only
  hermitian positive definite matrices are supported.
  @end_compatibility
  """
  # This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
  # where C is the cholesky decomposition of A.
  with ops.name_scope(name, 'logdet', [matrix]):
    chol = gen_linalg_ops.cholesky(matrix)
    return 2.0 * math_ops.reduce_sum(
        math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
        axis=[-1])
示例#9
0
def logdet(matrix, name=None):
  """Computes log of the determinant of a hermitian positive definite matrix.

  ```python
  # Compute the determinant of a matrix while reducing the chance of over- or
  underflow:
  A = ... # shape 10 x 10
  det = tf.exp(tf.logdet(A))  # scalar
  ```

  Args:
    matrix:  A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
      or `complex128` with shape `[..., M, M]`.
    name:  A name to give this `Op`.  Defaults to `logdet`.

  Returns:
    The natural log of the determinant of `matrix`.

  @compatibility(numpy)
  Equivalent to numpy.linalg.slogdet, although no sign is returned since only
  hermitian positive definite matrices are supported.
  @end_compatibility
  """
  # This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
  # where C is the cholesky decomposition of A.
  with ops.name_scope(name, 'logdet', [matrix]):
    chol = gen_linalg_ops.cholesky(matrix)
    return 2.0 * math_ops.reduce_sum(
        math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
        reduction_indices=[-1])
示例#10
0
def _dct2_1d(signals, name=None):
  """Computes the type II 1D Discrete Cosine Transform (DCT) of `signals`.

  Args:
    signals: A `[..., samples]` `float32` `Tensor` containing the signals to
      take the DCT of.
    name: An optional name for the operation.

  Returns:
    A `[..., samples]` `float32` `Tensor` containing the DCT of `signals`.

  """
  with ops.name_scope(name, 'dct', [signals]):
    # We use the FFT to compute the DCT and TensorFlow only supports float32 for
    # FFTs at the moment.
    signals = ops.convert_to_tensor(signals, dtype=dtypes.float32)

    axis_dim = signals.shape[-1].value or array_ops.shape(signals)[-1]
    axis_dim_float = math_ops.to_float(axis_dim)
    scale = 2.0 * math_ops.exp(math_ops.complex(
        0.0, -math.pi * math_ops.range(axis_dim_float) /
        (2.0 * axis_dim_float)))

    rfft = spectral_ops.rfft(signals, fft_length=[2 * axis_dim])[..., :axis_dim]
    dct2 = math_ops.real(rfft * scale)
    return dct2
示例#11
0
  def test_defining_spd_operator_by_taking_real_part(self):
    with self.cached_session() as sess:
      # S is real and positive.
      s = linear_operator_test_util.random_uniform(
          shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)

      # Let S = S1 + S2, the Hermitian and anti-hermitian parts.
      # S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
      # where ^H is the Hermitian transpose of the function:
      #    f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
      # We want to isolate S1, since
      #   S1 is Hermitian by construction
      #   S1 is real since S is
      #   S1 is positive since it is the sum of two positive kernels

      # IDFT[S] = IDFT[S1] + IDFT[S2]
      #         =      H1  +      H2
      # where H1 is real since it is Hermitian,
      # and H2 is imaginary since it is anti-Hermitian.
      ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))

      # Throw away H2, keep H1.
      real_ifft_s = math_ops.real(ifft_s)

      # This is the perfect spectrum!
      # spectrum = DFT[H1]
      #          = S1,
      fft_real_ifft_s = fft_ops.fft3d(
          math_ops.cast(real_ifft_s, dtypes.complex64))

      # S1 is Hermitian ==> operator is real.
      # S1 is real ==> operator is self-adjoint.
      # S1 is positive ==> operator is positive-definite.
      operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)

      # Allow for complex output so we can check operator has zero imag part.
      self.assertEqual(operator.dtype, dtypes.complex64)
      matrix, matrix_t = sess.run([
          operator.to_dense(),
          array_ops.matrix_transpose(operator.to_dense())
      ])
      operator.assert_positive_definite().run()  # Should not fail.
      np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
      self.assertAllClose(matrix, matrix_t)

      # Just to test the theory, get S2 as well.
      # This should create an imaginary operator.
      # S2 is anti-Hermitian ==> operator is imaginary.
      # S2 is real ==> operator is self-adjoint.
      imag_ifft_s = math_ops.imag(ifft_s)
      fft_imag_ifft_s = fft_ops.fft3d(
          1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
      operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)

      matrix, matrix_h = sess.run([
          operator_imag.to_dense(),
          array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
      ])
      self.assertAllClose(matrix, matrix_h)
      np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
示例#12
0
    def _checkGradComplex(self,
                          func,
                          x,
                          y,
                          result_is_complex=True,
                          rtol=1e-2,
                          atol=1e-2):
        with self.cached_session(use_gpu=True):
            inx = ops.convert_to_tensor(x)
            iny = ops.convert_to_tensor(y)
            # func is a forward or inverse, real or complex, batched or unbatched FFT
            # function with a complex input.
            z = func(math_ops.complex(inx, iny))
            # loss = sum(|z|^2)
            loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))

            ((x_jacob_t, x_jacob_n),
             (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
                 [inx, iny], [list(x.shape), list(y.shape)],
                 loss, [1],
                 x_init_value=[x, y],
                 delta=1e-2)

        self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
        self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
示例#13
0
 def testRealReal(self):
   for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
                 dtypes_lib.float64):
     with self.subTest(dtype=dtype):
       x = array_ops.placeholder(dtype)
       y = math_ops.real(x)
       self.assertEqual(x, y)
示例#14
0
  def _trace(self):
    # The diagonal of the [[nested] block] circulant operator is the mean of
    # the spectrum.
    # Proof:  For the [0,...,0] element, this follows from the IDFT formula.
    # Then the result follows since all diagonal elements are the same.

    # Therefore, the trace is the sum of the spectrum.

    # Get shape of diag along with the axis over which to reduce the spectrum.
    # We will reduce the spectrum over all block indices.
    if self.spectrum.get_shape().is_fully_defined():
      spec_rank = self.spectrum.get_shape().ndims
      axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32)
    else:
      spec_rank = array_ops.rank(self.spectrum)
      axis = math_ops.range(spec_rank - self.block_depth, spec_rank)

    # Real diag part "re_d".
    # Suppose spectrum.shape = [B1,...,Bb, N1, N2]
    # self.shape = [B1,...,Bb, N, N], with N1 * N2 = N.
    # re_d_value.shape = [B1,...,Bb]
    re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis)

    if not self.dtype.is_complex:
      return math_ops.cast(re_d_value, self.dtype)

    # Imaginary part, "im_d".
    if self.is_self_adjoint:
      im_d_value = 0.
    else:
      im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis)

    return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype)
示例#15
0
  def _compareMulGradient(self, data):
    # data is a float matrix of shape [n, 4].  data[:, 0], data[:, 1],
    # data[:, 2], data[:, 3] are real parts of x, imaginary parts of
    # x, real parts of y and imaginary parts of y.
    with self.cached_session():
      inp = ops.convert_to_tensor(data)
      xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)

      def vec(x):  # Reshape to a vector
        return array_ops.reshape(x, [-1])

      xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)

      def cplx(r, i):  # Combine to a complex vector
        return math_ops.complex(r, i)

      x, y = cplx(xr, xi), cplx(yr, yi)
      # z is x times y in complex plane.
      z = x * y
      # Defines the loss function as the sum of all coefficients of z.
      loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
      epsilon = 0.005
      jacob_t, jacob_n = gradient_checker.compute_gradient(
          inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
  def _trace(self):
    # The diagonal of the [[nested] block] circulant operator is the mean of
    # the spectrum.
    # Proof:  For the [0,...,0] element, this follows from the IDFT formula.
    # Then the result follows since all diagonal elements are the same.

    # Therefore, the trace is the sum of the spectrum.

    # Get shape of diag along with the axis over which to reduce the spectrum.
    # We will reduce the spectrum over all block indices.
    if self.spectrum.get_shape().is_fully_defined():
      spec_rank = self.spectrum.get_shape().ndims
      axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32)
    else:
      spec_rank = array_ops.rank(self.spectrum)
      axis = math_ops.range(spec_rank - self.block_depth, spec_rank)

    # Real diag part "re_d".
    # Suppose spectrum.shape = [B1,...,Bb, N1, N2]
    # self.shape = [B1,...,Bb, N, N], with N1 * N2 = N.
    # re_d_value.shape = [B1,...,Bb]
    re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis)

    if not self.dtype.is_complex:
      return math_ops.cast(re_d_value, self.dtype)

    # Imaginary part, "im_d".
    if self.is_self_adjoint:
      im_d_value = 0.
    else:
      im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis)

    return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype)
示例#17
0
    def _operator_and_matrix(self, build_info, dtype, use_placeholder):
        shape = build_info.shape
        # For this test class, we are creating Hermitian spectrums.
        # We also want the spectrum to have eigenvalues bounded away from zero.
        #
        # pre_spectrum is bounded away from zero.
        pre_spectrum = linear_operator_test_util.random_uniform(
            shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
        pre_spectrum_c = _to_complex(pre_spectrum)

        # Real{IFFT[pre_spectrum]}
        #  = IFFT[EvenPartOf[pre_spectrum]]
        # is the IFFT of something that is also bounded away from zero.
        # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
        pre_h = math_ops.ifft2d(pre_spectrum_c)

        # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
        # So we will make spectrum = FFT[h], for real valued h.
        h = math_ops.real(pre_h)
        h_c = _to_complex(h)

        spectrum = math_ops.fft2d(h_c)

        lin_op_spectrum = spectrum

        if use_placeholder:
            lin_op_spectrum = array_ops.placeholder_with_default(spectrum,
                                                                 shape=None)

        operator = linalg.LinearOperatorCirculant2D(lin_op_spectrum,
                                                    input_output_dtype=dtype)

        mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

        return operator, mat
  def test_defining_spd_operator_by_taking_real_part(self):
    with self.cached_session() as sess:
      # S is real and positive.
      s = linear_operator_test_util.random_uniform(
          shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)

      # Let S = S1 + S2, the Hermitian and anti-hermitian parts.
      # S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
      # where ^H is the Hermitian transpose of the function:
      #    f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
      # We want to isolate S1, since
      #   S1 is Hermitian by construction
      #   S1 is real since S is
      #   S1 is positive since it is the sum of two positive kernels

      # IDFT[S] = IDFT[S1] + IDFT[S2]
      #         =      H1  +      H2
      # where H1 is real since it is Hermitian,
      # and H2 is imaginary since it is anti-Hermitian.
      ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))

      # Throw away H2, keep H1.
      real_ifft_s = math_ops.real(ifft_s)

      # This is the perfect spectrum!
      # spectrum = DFT[H1]
      #          = S1,
      fft_real_ifft_s = fft_ops.fft3d(
          math_ops.cast(real_ifft_s, dtypes.complex64))

      # S1 is Hermitian ==> operator is real.
      # S1 is real ==> operator is self-adjoint.
      # S1 is positive ==> operator is positive-definite.
      operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)

      # Allow for complex output so we can check operator has zero imag part.
      self.assertEqual(operator.dtype, dtypes.complex64)
      matrix, matrix_t = sess.run([
          operator.to_dense(),
          array_ops.matrix_transpose(operator.to_dense())
      ])
      operator.assert_positive_definite().run()  # Should not fail.
      np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
      self.assertAllClose(matrix, matrix_t)

      # Just to test the theory, get S2 as well.
      # This should create an imaginary operator.
      # S2 is anti-Hermitian ==> operator is imaginary.
      # S2 is real ==> operator is self-adjoint.
      imag_ifft_s = math_ops.imag(ifft_s)
      fft_imag_ifft_s = fft_ops.fft3d(
          1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
      operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)

      matrix, matrix_h = sess.run([
          operator_imag.to_dense(),
          array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
      ])
      self.assertAllClose(matrix, matrix_h)
      np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
示例#19
0
  def operator_and_matrix(self,
                          shape_info,
                          dtype,
                          use_placeholder,
                          ensure_self_adjoint_and_pd=False):
    shape = shape_info.shape
    # For this test class, we are creating Hermitian spectrums.
    # We also want the spectrum to have eigenvalues bounded away from zero.
    #
    # pre_spectrum is bounded away from zero.
    pre_spectrum = linear_operator_test_util.random_uniform(
        shape=self._shape_to_spectrum_shape(shape),
        dtype=dtype,
        minval=1.,
        maxval=2.)
    pre_spectrum_c = _to_complex(pre_spectrum)

    # Real{IFFT[pre_spectrum]}
    #  = IFFT[EvenPartOf[pre_spectrum]]
    # is the IFFT of something that is also bounded away from zero.
    # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
    pre_h = fft_ops.ifft2d(pre_spectrum_c)

    # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
    # So we will make spectrum = FFT[h], for real valued h.
    h = math_ops.real(pre_h)
    h_c = _to_complex(h)

    spectrum = fft_ops.fft2d(h_c)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant2D(
        lin_op_spectrum,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        input_output_dtype=dtype)

    self.assertEqual(
        operator.parameters,
        {
            "input_output_dtype": dtype,
            "is_non_singular": None,
            "is_positive_definite": (
                True if ensure_self_adjoint_and_pd else None),
            "is_self_adjoint": (
                True if ensure_self_adjoint_and_pd else None),
            "is_square": True,
            "name": "LinearOperatorCirculant2D",
            "spectrum": lin_op_spectrum,
        })

    mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

    return operator, mat
示例#20
0
def dct(input, type=2, n=None, axis=-1, norm=None, name=None):  # pylint: disable=redefined-builtin
    """Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.

  Currently only Type II is supported. Implemented using a length `2N` padded
  @{tf.spectral.rfft}, as described here: https://dsp.stackexchange.com/a/10606

  @compatibility(scipy)
  Equivalent to scipy.fftpack.dct for the Type-II DCT.
  https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
  @end_compatibility

  Args:
    input: A `[..., samples]` `float32` `Tensor` containing the signals to
      take the DCT of.
    type: The DCT type to perform. Must be 2.
    n: For future expansion. The length of the transform. Must be `None`.
    axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
    norm: The normalization to apply. `None` for no normalization or `'ortho'`
      for orthonormal normalization.
    name: An optional name for the operation.

  Returns:
    A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.

  Raises:
    ValueError: If `type` is not `2`, `n` is not `None, `axis` is not `-1`, or
      `norm` is not `None` or `'ortho'`.

  [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
  """
    _validate_dct_arguments(type, n, axis, norm)
    with _ops.name_scope(name, "dct", [input]):
        # We use the RFFT to compute the DCT and TensorFlow only supports float32
        # for FFTs at the moment.
        input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)

        axis_dim = input.shape[-1].value or _array_ops.shape(input)[-1]
        axis_dim_float = _math_ops.to_float(axis_dim)
        scale = 2.0 * _math_ops.exp(
            _math_ops.complex(
                0.0, -_math.pi * _math_ops.range(axis_dim_float) /
                (2.0 * axis_dim_float)))

        # TODO(rjryan): Benchmark performance and memory usage of the various
        # approaches to computing a DCT via the RFFT.
        dct2 = _math_ops.real(
            rfft(input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)

        if norm == "ortho":
            n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
            n2 = n1 * _math_ops.sqrt(2.0)
            # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
            weights = _array_ops.pad(_array_ops.expand_dims(n1, 0),
                                     [[0, axis_dim - 1]],
                                     constant_values=n2)
            dct2 *= weights

        return dct2
示例#21
0
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
    """Computes the singular value decompositions of one or more matrices.

  Computes the SVD of each inner matrix in `tensor` such that
  `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
  :])`

  ```prettyprint
  # a is a tensor.
  # s is a tensor of singular values.
  # u is a tensor of left singular vectors.
  #v is a tensor of right singular vectors.
  s, u, v = svd(a)
  s = svd(a, compute_uv=False)
  ```

  Args:
    tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
      `N`.
    full_matrices: If true, compute full-sized `u` and `v`. If false
      (the default), compute only the leading `P` singular vectors.
      Ignored if `compute_uv` is `False`.
    compute_uv: If `True` then left and right singular vectors will be
      computed and returned in `u` and `v`, respectively. Otherwise, only the
      singular values will be computed, which can be significantly faster.
    name: string, optional name of the operation.

  Returns:
    s: Singular values. Shape is `[..., P]`.
    u: Right singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
      `[..., M, M]`. Not returned if `compute_uv` is `False`.
    v: Left singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
      `[..., N, N]`. Not returned if `compute_uv` is `False`.
  """
    # pylint: disable=protected-access
    s, u, v = gen_linalg_ops._svd(tensor,
                                  compute_uv=compute_uv,
                                  full_matrices=full_matrices)
    # pylint: enable=protected-access
    if compute_uv:
        return math_ops.real(s), u, v
    else:
        return math_ops.real(s)
示例#22
0
def dct(input, type=2, n=None, axis=-1, norm=None, name=None):  # pylint: disable=redefined-builtin
  """Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.

  Currently only Type II is supported. Implemented using a length `2N` padded
  @{tf.spectral.rfft}, as described here: https://dsp.stackexchange.com/a/10606

  @compatibility(scipy)
  Equivalent to scipy.fftpack.dct for the Type-II DCT.
  https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
  @end_compatibility

  Args:
    input: A `[..., samples]` `float32` `Tensor` containing the signals to
      take the DCT of.
    type: The DCT type to perform. Must be 2.
    n: For future expansion. The length of the transform. Must be `None`.
    axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
    norm: The normalization to apply. `None` for no normalization or `'ortho'`
      for orthonormal normalization.
    name: An optional name for the operation.

  Returns:
    A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.

  Raises:
    ValueError: If `type` is not `2`, `n` is not `None, `axis` is not `-1`, or
      `norm` is not `None` or `'ortho'`.

  [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
  """
  _validate_dct_arguments(type, n, axis, norm)
  with _ops.name_scope(name, "dct", [input]):
    # We use the RFFT to compute the DCT and TensorFlow only supports float32
    # for FFTs at the moment.
    input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)

    axis_dim = input.shape[-1].value or _array_ops.shape(input)[-1]
    axis_dim_float = _math_ops.to_float(axis_dim)
    scale = 2.0 * _math_ops.exp(_math_ops.complex(
        0.0, -_math.pi * _math_ops.range(axis_dim_float) /
        (2.0 * axis_dim_float)))

    # TODO(rjryan): Benchmark performance and memory usage of the various
    # approaches to computing a DCT via the RFFT.
    dct2 = _math_ops.real(
        rfft(input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)

    if norm == "ortho":
      n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
      n2 = n1 * _math_ops.sqrt(2.0)
      # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
      weights = _array_ops.pad(
          _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
          constant_values=n2)
      dct2 *= weights

    return dct2
示例#23
0
def _ComplexGrad(op, grad):
  """Returns the real and imaginary components of 'grad', respectively."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
          array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
示例#24
0
 def f(inx, iny):
   inx.set_shape(x.shape)
   iny.set_shape(y.shape)
   # func is a forward or inverse, real or complex, batched or unbatched
   # FFT function with a complex input.
   z = func(math_ops.complex(inx, iny))
   # loss = sum(|z|^2)
   loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
   return loss
示例#25
0
 def testRealImagNumericType(self):
   for use_gpu in [True, False]:
     for value in [1., 1j, 1. + 1j]:
       np_real, np_imag = np.real(value), np.imag(value)
       with test_util.device(use_gpu=use_gpu):
         tf_real = math_ops.real(value)
         tf_imag = math_ops.imag(value)
         self.assertAllEqual(np_real, self.evaluate(tf_real))
         self.assertAllEqual(np_imag, self.evaluate(tf_imag))
示例#26
0
def _ComplexGrad(op, grad):
  """Returns the real and imaginary components of 'grad', respectively."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
          array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
 def _assert_positive_definite(self):
   # This operator has the action  Ax = F^H D F x,
   # where D is the diagonal matrix with self.spectrum on the diag.  Therefore,
   # <x, Ax> = <Fx, DFx>,
   # Since F is bijective, the condition for positive definite is the same as
   # for a diagonal matrix, i.e. real part of spectrum is positive.
   message = (
       "Not positive definite:  Real part of spectrum was not all positive.")
   return check_ops.assert_positive(
       math_ops.real(self.spectrum), message=message)
示例#28
0
def _AngleGrad(op, grad):
  """Returns -grad / (Im(x) + iRe(x))"""
  x = op.inputs[0]
  with ops.control_dependencies([grad]):
    re = math_ops.real(x)
    im = math_ops.imag(x)
    z = math_ops.reciprocal(math_ops.complex(im, re))
    zero = constant_op.constant(0, dtype=grad.dtype)
    complex_grad = math_ops.complex(grad, zero)
    return -complex_grad * z
示例#29
0
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
  """Computes the singular value decompositions of one or more matrices.

  Computes the SVD of each inner matrix in `tensor` such that
  `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
  :])`

  ```prettyprint
  # a is a tensor.
  # s is a tensor of singular values.
  # u is a tensor of left singular vectors.
  # v is a tensor of right singular vectors.
  s, u, v = svd(a)
  s = svd(a, compute_uv=False)
  ```

  Args:
    matrix: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
      `N`.
    full_matrices: If true, compute full-sized `u` and `v`. If false
      (the default), compute only the leading `P` singular vectors.
      Ignored if `compute_uv` is `False`.
    compute_uv: If `True` then left and right singular vectors will be
      computed and returned in `u` and `v`, respectively. Otherwise, only the
      singular values will be computed, which can be significantly faster.
    name: string, optional name of the operation.

  Returns:
    s: Singular values. Shape is `[..., P]`.
    u: Right singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
      `[..., M, M]`. Not returned if `compute_uv` is `False`.
    v: Left singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
      `[..., N, N]`. Not returned if `compute_uv` is `False`.
  """
  # pylint: disable=protected-access
  s, u, v = gen_linalg_ops._svd(
      tensor, compute_uv=compute_uv, full_matrices=full_matrices)
  if compute_uv:
    return math_ops.real(s), u, v
  else:
    return math_ops.real(s)
示例#30
0
def _AngleGrad(op, grad):
    """Returns -grad / (Im(x) + iRe(x))"""
    x = op.inputs[0]
    with ops.control_dependencies([grad]):
        re = math_ops.real(x)
        im = math_ops.imag(x)
        z = math_ops.reciprocal(math_ops.complex(im, re))
        zero = constant_op.constant(0, dtype=grad.dtype)
        complex_grad = math_ops.complex(grad, zero)
        return -complex_grad * z
示例#31
0
 def _assert_positive_definite(self):
   # This operator has the action  Ax = F^H D F x,
   # where D is the diagonal matrix with self.spectrum on the diag.  Therefore,
   # <x, Ax> = <Fx, DFx>,
   # Since F is bijective, the condition for positive definite is the same as
   # for a diagonal matrix, i.e. real part of spectrum is positive.
   message = (
       "Not positive definite:  Real part of spectrum was not all positive.")
   return check_ops.assert_positive(
       math_ops.real(self.spectrum), message=message)
示例#32
0
  def _checkGradReal(self, func, x, use_gpu=False):
    with self.test_session(use_gpu=use_gpu):
      inx = ops.convert_to_tensor(x)
      # func is a forward RFFT function (batched or unbatched).
      z = func(inx)
      # loss = sum(|z|^2)
      loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
      x_jacob_t, x_jacob_n = test.compute_gradient(
          inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)

    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
    def _assert_positive_definite(self):
        if self.dtype.is_complex:
            message = (
                "Diagonal operator had diagonal entries with non-positive real part, "
                "thus was not positive definite.")
        else:
            message = (
                "Real diagonal operator had non-positive diagonal entries, "
                "thus was not positive definite.")

        return check_ops.assert_positive(math_ops.real(self._diag),
                                         message=message)
示例#34
0
def modrelu(z, b, comp):
    if comp:
        z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
        step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
    else:
        z_norm = math_ops.abs(z) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = nn_ops.relu(step1)
        step3 = math_ops.sign(z)
    return math_ops.multiply(step3, step2)
示例#35
0
def _EigGrad(op, grad_e, grad_v):
    """Gradient for Eig.

  Based on eq. 4.77 from paper by
  Christoph Boeddeker et al.
  https://arxiv.org/abs/1701.00392
  See also
  "Computation of eigenvalue and eigenvector derivatives
  for a general complex-valued eigensystem" by Nico van der Aa.
  As for now only distinct eigenvalue case is considered.
  """
    e = op.outputs[0]
    compute_v = op.get_attr("compute_v")
    # a = op.inputs[0], which satisfies
    # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
    with ops.control_dependencies([grad_e, grad_v]):
        if compute_v:
            v = op.outputs[1]
            vt = _linalg.adjoint(v)
            # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
            # Notice that because of the term involving f, the gradient becomes
            # infinite (or NaN in practice) when eigenvalues are not unique.
            # Mathematically this should not be surprising, since for (k-fold)
            # degenerate eigenvalues, the corresponding eigenvectors are only defined
            # up to arbitrary rotation in a (k-dimensional) subspace.
            f = array_ops.matrix_set_diag(
                _SafeReciprocal(
                    array_ops.expand_dims(e, -2) -
                    array_ops.expand_dims(e, -1)), array_ops.zeros_like(e))
            f = math_ops.conj(f)
            vgv = math_ops.matmul(vt, grad_v)
            mid = array_ops.matrix_diag(grad_e)
            diag_grad_part = array_ops.matrix_diag(
                array_ops.matrix_diag_part(
                    math_ops.cast(math_ops.real(vgv), vgv.dtype)))
            mid += f * (
                vgv - math_ops.matmul(math_ops.matmul(vt, v), diag_grad_part))
            # vt is formally invertible as long as the original matrix is
            # diagonalizable. However, in practice, vt may
            # be ill-conditioned when matrix original matrix is close to
            # non-diagonalizable one
            grad_a = linalg_ops.matrix_solve(vt, math_ops.matmul(mid, vt))
        else:
            _, v = linalg_ops.eig(op.inputs[0])
            vt = _linalg.adjoint(v)
            # vt is formally invertible as long as the original matrix is
            # diagonalizable. However, in practice, vt may
            # be ill-conditioned when matrix original matrix is close to
            # non-diagonalizable one
            grad_a = linalg_ops.matrix_solve(
                vt, math_ops.matmul(array_ops.matrix_diag(grad_e), vt))
        return math_ops.cast(grad_a, op.inputs[0].dtype)
  def _assert_positive_definite(self):
    if self.dtype.is_complex:
      message = (
          "Diagonal operator had diagonal entries with non-positive real part, "
          "thus was not positive definite.")
    else:
      message = (
          "Real diagonal operator had non-positive diagonal entries, "
          "thus was not positive definite.")

    return check_ops.assert_positive(
        math_ops.real(self._diag),
        message=message)
示例#37
0
 def _checkGrad(self, func, x, y, use_gpu=False):
   with self.test_session(use_gpu=use_gpu):
     inx = ops.convert_to_tensor(x)
     iny = ops.convert_to_tensor(y)
     # func is a forward or inverse FFT function (batched or unbatched)
     z = func(math_ops.complex(inx, iny))
     # loss = sum(|z|^2)
     loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
     ((x_jacob_t, x_jacob_n),
      (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
          [inx, iny], [list(x.shape), list(y.shape)],
          loss, [1],
          x_init_value=[x, y],
          delta=1e-2)
   self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
   self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
示例#38
0
 def _checkGrad(self, func, x, y, use_gpu=False):
     with self.test_session(use_gpu=use_gpu):
         inx = ops.convert_to_tensor(x)
         iny = ops.convert_to_tensor(y)
         # func is a forward or inverse FFT function (batched or unbatched)
         z = func(math_ops.complex(inx, iny))
         # loss = sum(|z|^2)
         loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
         ((x_jacob_t, x_jacob_n),
          (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
              [inx, iny], [list(x.shape), list(y.shape)],
              loss, [1],
              x_init_value=[x, y],
              delta=1e-2)
     self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
     self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
示例#39
0
def real(val):
    """Returns real parts of all elements in `a`.

  Uses `tf.real`.

  Args:
    val: array_like. Could be an ndarray, a Tensor or any object that can be
      converted to a Tensor using `tf.convert_to_tensor`.

  Returns:
    An ndarray with the same shape as `a`.
  """
    val = asarray(val)
    # TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
    # return an ndarray.
    return np_utils.tensor_to_ndarray(math_ops.real(val.data))
示例#40
0
      def Compute(x):
        e, v = linalg_ops.eig(x)

        # We sort eigenvalues by e.real+e.imag to have consistent
        # order between runs
        b_dims = len(e.shape) - 1
        idx = sort_ops.argsort(math_ops.real(e) + math_ops.imag(e), axis=-1)
        e = array_ops.gather(e, idx, batch_dims=b_dims)
        v = array_ops.gather(v, idx, batch_dims=b_dims)

        # (complex) Eigenvectors are only unique up to an arbitrary phase
        # We normalize the vectors such that the first component has phase 0.
        top_rows = v[..., 0:1, :]
        angle = -math_ops.angle(top_rows)
        phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
        v *= phase
        return e, v
示例#41
0
 def _compareGradient(self, x):
   # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
   # complex numbers. Then, we extract real and imag parts and
   # computes the squared sum. This is obviously the same as sum(real
   # * real) + sum(imag * imag). We just want to make sure the
   # gradient function is checked.
   with self.cached_session():
     inx = ops.convert_to_tensor(x)
     real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
     real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
     cplx = math_ops.complex(real, imag)
     cplx = math_ops.conj(cplx)
     loss = math_ops.reduce_sum(math_ops.square(
         math_ops.real(cplx))) + math_ops.reduce_sum(
             math_ops.square(math_ops.imag(cplx)))
     epsilon = 1e-3
     jacob_t, jacob_n = gradient_checker.compute_gradient(
         inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
   self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
示例#42
0
  def _checkGradComplex(self, func, x, y, result_is_complex=True,
                        rtol=1e-2, atol=1e-2):
    with self.cached_session(use_gpu=True):
      inx = ops.convert_to_tensor(x)
      iny = ops.convert_to_tensor(y)
      # func is a forward or inverse, real or complex, batched or unbatched FFT
      # function with a complex input.
      z = func(math_ops.complex(inx, iny))
      # loss = sum(|z|^2)
      loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))

      ((x_jacob_t, x_jacob_n),
       (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
           [inx, iny], [list(x.shape), list(y.shape)],
           loss, [1],
           x_init_value=[x, y],
           delta=1e-2)

    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
    self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
    def _operator_and_mat_and_feed_dict(self, build_info, dtype,
                                        use_placeholder):
        shape = build_info.shape
        # For this test class, we are creating Hermitian spectrums.
        # We also want the spectrum to have eigenvalues bounded away from zero.
        #
        # pre_spectrum is bounded away from zero.
        pre_spectrum = linear_operator_test_util.random_uniform(
            shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
        pre_spectrum_c = _to_complex(pre_spectrum)

        # Real{IFFT[pre_spectrum]}
        #  = IFFT[EvenPartOf[pre_spectrum]]
        # is the IFFT of something that is also bounded away from zero.
        # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
        pre_h = math_ops.ifft2d(pre_spectrum_c)

        # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
        # So we will make spectrum = FFT[h], for real valued h.
        h = math_ops.real(pre_h)
        h_c = _to_complex(h)

        spectrum = math_ops.fft2d(h_c)

        if use_placeholder:
            spectrum_ph = array_ops.placeholder(dtypes.complex64)
            # Evaluate here because (i) you cannot feed a tensor, and (ii)
            # it is random and we want the same value used for both mat and feed_dict.
            spectrum = spectrum.eval()
            operator = linalg.LinearOperatorCirculant2D(
                spectrum_ph, input_output_dtype=dtype)
            feed_dict = {spectrum_ph: spectrum}
        else:
            operator = linalg.LinearOperatorCirculant2D(
                spectrum, input_output_dtype=dtype)
            feed_dict = None

        mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

        return operator, mat, feed_dict
  def _operator_and_mat_and_feed_dict(self, build_info, dtype, use_placeholder):
    shape = build_info.shape
    # For this test class, we are creating Hermitian spectrums.
    # We also want the spectrum to have eigenvalues bounded away from zero.
    #
    # pre_spectrum is bounded away from zero.
    pre_spectrum = linear_operator_test_util.random_uniform(
        shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
    pre_spectrum_c = _to_complex(pre_spectrum)

    # Real{IFFT[pre_spectrum]}
    #  = IFFT[EvenPartOf[pre_spectrum]]
    # is the IFFT of something that is also bounded away from zero.
    # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
    pre_h = math_ops.ifft2d(pre_spectrum_c)

    # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
    # So we will make spectrum = FFT[h], for real valued h.
    h = math_ops.real(pre_h)
    h_c = _to_complex(h)

    spectrum = math_ops.fft2d(h_c)

    if use_placeholder:
      spectrum_ph = array_ops.placeholder(dtypes.complex64)
      # Evaluate here because (i) you cannot feed a tensor, and (ii)
      # it is random and we want the same value used for both mat and feed_dict.
      spectrum = spectrum.eval()
      operator = linalg.LinearOperatorCirculant2D(
          spectrum_ph, input_output_dtype=dtype)
      feed_dict = {spectrum_ph: spectrum}
    else:
      operator = linalg.LinearOperatorCirculant2D(
          spectrum, input_output_dtype=dtype)
      feed_dict = None

    mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)

    return operator, mat, feed_dict
示例#45
0
 def testRealReal(self):
   for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
                 dtypes_lib.float64):
     x = array_ops.placeholder(dtype)
     y = math_ops.real(x)
     self.assertEqual(x, y)
示例#46
0
  def _Grad(op, grad):
    """A gradient function for RFFT with the provided `rank` and `irfft_fn`."""
    fft_length = op.inputs[1]
    input_shape = array_ops.shape(op.inputs[0])
    is_even = math_ops.cast(1 - (fft_length[-1] % 2), dtypes.complex64)

    def _TileForBroadcasting(matrix, t):
      expanded = array_ops.reshape(
          matrix,
          array_ops.concat([
              array_ops.ones([array_ops.rank(t) - 2], dtypes.int32),
              array_ops.shape(matrix)
          ], 0))
      return array_ops.tile(
          expanded, array_ops.concat([array_ops.shape(t)[:-2], [1, 1]], 0))

    def _MaskMatrix(length):
      # TODO(rjryan): Speed up computation of twiddle factors using the
      # following recurrence relation and cache them across invocations of RFFT.
      #
      # t_n = exp(sqrt(-1) * pi * n^2 / line_len)
      # for n = 0, 1,..., line_len-1.
      # For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2
      a = array_ops.tile(
          array_ops.expand_dims(math_ops.range(length), 0), (length, 1))
      b = array_ops.transpose(a, [1, 0])
      return math_ops.exp(-2j * np.pi * math_ops.cast(a * b, dtypes.complex64) /
                          math_ops.cast(length, dtypes.complex64))

    def _YMMask(length):
      """A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`."""
      return math_ops.cast(1 - 2 * (math_ops.range(length) % 2),
                           dtypes.complex64)

    y0 = grad[..., 0:1]
    if rank == 1:
      ym = grad[..., -1:]
      extra_terms = y0 + is_even * ym * _YMMask(input_shape[-1])
    elif rank == 2:
      # Create a mask matrix for y0 and ym.
      base_mask = _MaskMatrix(input_shape[-2])

      # Tile base_mask to match y0 in shape so that we can batch-matmul the
      # inner 2 dimensions.
      tiled_mask = _TileForBroadcasting(base_mask, y0)

      y0_term = math_ops.matmul(tiled_mask, math_ops.conj(y0))
      extra_terms = y0_term

      ym = grad[..., -1:]
      ym_term = math_ops.matmul(tiled_mask, math_ops.conj(ym))

      inner_dim = input_shape[-1]
      ym_term = array_ops.tile(
          ym_term,
          array_ops.concat([
              array_ops.ones([array_ops.rank(grad) - 1], dtypes.int32),
              [inner_dim]
          ], 0)) * _YMMask(inner_dim)

      extra_terms += is_even * ym_term

    # The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
    # factor, plus some additional terms to make up for the components dropped
    # due to Hermitian symmetry.
    input_size = math_ops.to_float(_FFTSizeForGrad(op.inputs[0], rank))
    irfft = irfft_fn(grad, fft_length)
    return 0.5 * (irfft * input_size + math_ops.real(extra_terms)), None
示例#47
0
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
  r"""Computes the singular value decompositions of one or more matrices.

  Computes the SVD of each inner matrix in `tensor` such that
  `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) *
   transpose(conj(v[..., :, :]))`

  ```python
  # a is a tensor.
  # s is a tensor of singular values.
  # u is a tensor of left singular vectors.
  # v is a tensor of right singular vectors.
  s, u, v = svd(a)
  s = svd(a, compute_uv=False)
  ```

  Args:
    tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
      `N`.
    full_matrices: If true, compute full-sized `u` and `v`. If false
      (the default), compute only the leading `P` singular vectors.
      Ignored if `compute_uv` is `False`.
    compute_uv: If `True` then left and right singular vectors will be
      computed and returned in `u` and `v`, respectively. Otherwise, only the
      singular values will be computed, which can be significantly faster.
    name: string, optional name of the operation.

  Returns:
    s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
      order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
      second largest, etc.
    u: Left singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
      `[..., M, M]`. Not returned if `compute_uv` is `False`.
    v: Right singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
      `[..., N, N]`. Not returned if `compute_uv` is `False`.

  @compatibility(numpy)
  Mostly equivalent to numpy.linalg.svd, except that
    * The order of output  arguments here is `s`, `u`, `v` when `compute_uv` is
      `True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd.
    * full_matrices is `False` by default as opposed to `True` for
       numpy.linalg.svd.
    * tf.linalg.svd uses the standard definition of the SVD
      \\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are
      the columns of `u`, while the right singular vectors of `a` are the
      columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint
      \\(V^H\\) as the third output argument.
  ```python
  import tensorflow as tf
  import numpy as np
  s, u, v = tf.linalg.svd(a)
  tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_b=True))
  u, s, v_adj = np.linalg.svd(a, full_matrices=False)
  np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
  # tf_a_approx and np_a_approx should be numerically close.
  ```
  @end_compatibility
  """
  s, u, v = gen_linalg_ops.svd(
      tensor, compute_uv=compute_uv, full_matrices=full_matrices, name=name)
  if compute_uv:
    return math_ops.real(s), u, v
  else:
    return math_ops.real(s)
示例#48
0
def _abs_square(x):
  if x.dtype.is_complex:
    return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
  else:
    return math_ops.square(x)
 def _assert_positive_definite(self):
   return check_ops.assert_positive(
       math_ops.real(self.multiplier),
       message="LinearOperator was not positive definite.")
示例#50
0
def _ComplexGrad(_, grad):
  """Returns the real and imaginary components of 'grad', respectively."""
  return math_ops.real(grad), math_ops.imag(grad)