def testStripDefaultAttrsInconsistentConsumerDefaults(self):
    if ops._USE_C_API: return  # TODO(skyewm): get this working

    export_dir = self._get_export_dir(
        "test_strip_default_attrs_no_consumer_defaults")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    # Add a graph with two float32 variables and a Complex Op composing them
    # with strip_default_attrs enabled. This must remove the following
    # defaults for the "Complex" Op:
    #   o "T"    : float32.   (input type)
    #   o "Tout" : complex64. (output type)
    with session.Session(graph=ops.Graph()) as sess:
      real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
      imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
      math_ops.complex(real_num, imag_num, name="complex")
      sess.run(variables.global_variables_initializer())
      builder.add_meta_graph_and_variables(
          sess, ["foo"], strip_default_attrs=True)

    # Save the SavedModel to disk in text format.
    builder.save(as_text=True)

    # Update the Op registry to remove defaults for all attrs("T", "Tout") from
    # the "Complex" OpDef.
    complex_op_def = op_def_registry.get_registered_ops()["Complex"]
    original_complex_op_def = op_def_pb2.OpDef()
    original_complex_op_def.CopyFrom(complex_op_def)
    for attr_def in complex_op_def.attr:
      attr_def.ClearField("default_value")

    # Loading the SavedModel via the loader must fail because the SavedModel
    # does not have any attr values for the "Complex" node and the current
    # op registry does not have have any default values for the "Complex" op.
    sess = session.Session(graph=ops.Graph())
    with self.assertRaisesRegexp(
        ValueError,
        "Expected one attr with name .*T(out)?.* in name: \"complex\".*"):
      loader.load(sess, ["foo"], export_dir)

    # Update the Op registry to change the defaults for attr "Tout"
    # (complex64 -> complex128).
    complex_op_def.CopyFrom(original_complex_op_def)
    for attr_def in complex_op_def.attr:
      if attr_def.name == "Tout":
        attr_def.default_value.type = types_pb2.DT_COMPLEX128

    # Loading the SavedModel via the loader must set "Tout" attr_value for the
    # "Complex" node according to the latest defaults (complex128). This is
    # expected to fail the model import as there is no OpKernel registered to
    # handle attrs "T" (float32) and "Tout" (complex128).
    sess = session.Session(graph=ops.Graph())
    with self.assertRaisesRegexp(
        errors.InvalidArgumentError,
        ".*No OpKernel was registered to support Op \'Complex\' with these "
        "attrs..*"):
      loader.load(sess, ["foo"], export_dir)
Example #2
0
def _AngleGrad(op, grad):
  """Returns -grad / (Im(x) + iRe(x))"""
  x = op.inputs[0]
  with ops.control_dependencies([grad]):
    re = math_ops.real(x)
    im = math_ops.imag(x)
    z = math_ops.reciprocal(math_ops.complex(im, re))
    zero = constant_op.constant(0, dtype=grad.dtype)
    complex_grad = math_ops.complex(grad, zero)
    return -complex_grad * z
Example #3
0
  def testDefaultAttrStripping(self):
    """Verifies that default attributes are stripped from a graph def."""

    # Complex Op has 2 attributes with defaults:
    #   o "T"    : float32.
    #   o "Tout" : complex64.

    # When inputs to the Complex Op are float32 instances, "T" maps to float32
    # and "Tout" maps to complex64. Since these attr values map to their
    # defaults, they must be stripped unless stripping of default attrs is
    # disabled.
    with self.cached_session():
      real_num = constant_op.constant(1.0, dtype=dtypes.float32, name="real")
      imag_num = constant_op.constant(2.0, dtype=dtypes.float32, name="imag")
      math_ops.complex(real_num, imag_num, name="complex")

      # strip_default_attrs is enabled.
      meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
          graph_def=ops.get_default_graph().as_graph_def(),
          strip_default_attrs=True)
      node_def = test_util.get_node_def_from_graph("complex",
                                                   meta_graph_def.graph_def)
      self.assertNotIn("T", node_def.attr)
      self.assertNotIn("Tout", node_def.attr)
      self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)

      # strip_default_attrs is disabled.
      meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
          graph_def=ops.get_default_graph().as_graph_def(),
          strip_default_attrs=False)
      node_def = test_util.get_node_def_from_graph("complex",
                                                   meta_graph_def.graph_def)
      self.assertIn("T", node_def.attr)
      self.assertIn("Tout", node_def.attr)
      self.assertFalse(meta_graph_def.meta_info_def.stripped_default_attrs)

    # When inputs to the Complex Op are float64 instances, "T" maps to float64
    # and "Tout" maps to complex128. Since these attr values don't map to their
    # defaults, they must not be stripped.
    with self.session(graph=ops.Graph()):
      real_num = constant_op.constant(1.0, dtype=dtypes.float64, name="real")
      imag_num = constant_op.constant(2.0, dtype=dtypes.float64, name="imag")
      math_ops.complex(real_num, imag_num, name="complex")
      meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
          graph_def=ops.get_default_graph().as_graph_def(),
          strip_default_attrs=True)
      node_def = test_util.get_node_def_from_graph("complex",
                                                   meta_graph_def.graph_def)
      self.assertEqual(node_def.attr["T"].type, dtypes.float64)
      self.assertEqual(node_def.attr["Tout"].type, dtypes.complex128)
      self.assertTrue(meta_graph_def.meta_info_def.stripped_default_attrs)
Example #4
0
def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None):
    """Tensor with (possibly complex) Uniform entries.

  Samples are distributed like

  ```
  Uniform[minval, maxval], if dtype is real,
  X + iY,  where X, Y ~ Uniform[minval, maxval], if dtype is complex.
  ```

  Args:
    shape:  `TensorShape` or Python list.  Shape of the returned tensor.
    minval:  `0-D` `Tensor` giving the minimum values.
    maxval:  `0-D` `Tensor` giving the maximum values.
    dtype:  `TensorFlow` `dtype` or Python dtype
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.
  """
    dtype = dtypes.as_dtype(dtype)

    with ops.name_scope("random_uniform"):
        samples = random_ops.random_uniform(shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
        if dtype.is_complex:
            if seed is not None:
                seed += 12345
            more_samples = random_ops.random_uniform(
                shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed
            )
            samples = math_ops.complex(samples, more_samples)
        return samples
  def _trace(self):
    # The diagonal of the [[nested] block] circulant operator is the mean of
    # the spectrum.
    # Proof:  For the [0,...,0] element, this follows from the IDFT formula.
    # Then the result follows since all diagonal elements are the same.

    # Therefore, the trace is the sum of the spectrum.

    # Get shape of diag along with the axis over which to reduce the spectrum.
    # We will reduce the spectrum over all block indices.
    if self.spectrum.get_shape().is_fully_defined():
      spec_rank = self.spectrum.get_shape().ndims
      axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32)
    else:
      spec_rank = array_ops.rank(self.spectrum)
      axis = math_ops.range(spec_rank - self.block_depth, spec_rank)

    # Real diag part "re_d".
    # Suppose spectrum.shape = [B1,...,Bb, N1, N2]
    # self.shape = [B1,...,Bb, N, N], with N1 * N2 = N.
    # re_d_value.shape = [B1,...,Bb]
    re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis)

    if not self.dtype.is_complex:
      return math_ops.cast(re_d_value, self.dtype)

    # Imaginary part, "im_d".
    if self.is_self_adjoint:
      im_d_value = 0.
    else:
      im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis)

    return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype)
 def test_assert_positive_definite_does_not_raise_if_pd_and_complex(self):
   with self.test_session():
     x = [1., 2.]
     y = [1., 0.]
     diag = math_ops.complex(x, y)  # Re[diag] > 0.
     # Should not fail
     linalg.LinearOperatorDiag(diag).assert_positive_definite().run()
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
  """Tensor with (possibly complex) Gaussian entries.

  Samples are distributed like

  ```
  N(mean, stddev^2), if dtype is real,
  X + iY,  where X, Y ~ N(mean, stddev^2) if dtype is complex.
  ```

  Args:
    shape:  `TensorShape` or Python list.  Shape of the returned tensor.
    mean:  `Tensor` giving mean of normal to sample from.
    stddev:  `Tensor` giving stdev of normal to sample from.
    dtype:  `TensorFlow` `dtype` or numpy dtype
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.
  """
  dtype = dtypes.as_dtype(dtype)

  with ops.name_scope("random_normal"):
    samples = random_ops.random_normal(
        shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
    if dtype.is_complex:
      if seed is not None:
        seed += 1234
      more_samples = random_ops.random_normal(
          shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
      samples = math_ops.complex(samples, more_samples)
    return samples
 def test_complex_tensor_with_imag_zero_doesnt_raise(self):
   x = ops.convert_to_tensor([1., 0, 3])
   y = ops.convert_to_tensor([0., 0, 0])
   z = math_ops.complex(x, y)
   with self.cached_session():
     # Should not raise.
     linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
 def test_complex_tensor_with_nonzero_imag_raises(self):
   x = ops.convert_to_tensor([1., 2, 0])
   y = ops.convert_to_tensor([1., 2, 0])
   z = math_ops.complex(x, y)
   with self.cached_session():
     with self.assertRaisesOpError("ABC123"):
       linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
 def test_assert_non_singular_does_not_raise_for_complex_nonsingular(self):
   with self.test_session():
     x = [1., 0.]
     y = [0., 1.]
     diag = math_ops.complex(x, y)
     # Should not raise.
     linalg.LinearOperatorDiag(diag).assert_non_singular().run()
Example #11
0
  def _test_unary_cwise_ops(self, ops, is_complex):
    for op in ops:
      with backprop.GradientTape(persistent=True) as g:
        x = random_ops.random_uniform([3, 5])
        g.watch(x)
        if is_complex:
          y = random_ops.random_uniform([3, 5])
          g.watch(y)
          x = math_ops.complex(x, y)

      # pylint: disable=cell-var-from-loop
      output_dtypes = []

      def loop_fn(i):
        with g:
          x1 = array_ops.gather(x, i)
          y1 = op(x1)
          outputs = [op(x), y1]
          if y1.dtype == dtypes.float32:
            loss = math_ops.reduce_sum(y1 * y1)
          else:
            loss = None
        if loss is not None:
          grad = g.gradient(loss, x1)
          if grad is not None:
            outputs.append(grad)
        del output_dtypes[:]
        output_dtypes.extend([t.dtype for t in outputs])
        return outputs

      # pylint: enable=cell-var-from-loop

      self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
 def test_zero_complex_tensor_raises(self):
   x = ops.convert_to_tensor([1., 2, 0])
   y = ops.convert_to_tensor([1., 2, 0])
   z = math_ops.complex(x, y)
   with self.test_session():
     with self.assertRaisesOpError("ABC123"):
       linear_operator_util.assert_no_entries_with_modulus_zero(
           z, message="ABC123").run()
 def test_nonzero_complex_tensor_doesnt_raise(self):
   x = ops.convert_to_tensor([1., 0, 3])
   y = ops.convert_to_tensor([1., 2, 0])
   z = math_ops.complex(x, y)
   with self.cached_session():
     # Should not raise.
     linear_operator_util.assert_no_entries_with_modulus_zero(
         z, message="ABC123").run()
 def test_assert_self_adjoint_does_not_raise_for_diag_with_zero_imag(self):
   with self.test_session():
     x = [1., 0.]
     y = [0., 0.]
     diag = math_ops.complex(x, y)
     operator = linalg.LinearOperatorDiag(diag)
     # Should not raise
     operator.assert_self_adjoint().run()
 def test_assert_self_adjoint_raises_if_diag_has_complex_part(self):
   with self.test_session():
     x = [1., 0.]
     y = [0., 1.]
     diag = math_ops.complex(x, y)
     operator = linalg.LinearOperatorDiag(diag)
     with self.assertRaisesOpError("imaginary.*not self-adjoint"):
       operator.assert_self_adjoint().run()
Example #16
0
 def _compareBroadcastGradient(self, x):
   x_ = ops.convert_to_tensor(x)
   epsilon = 1e-3
   with self.cached_session():
     for args in [(x_, 0.), (0., x_)]:
       z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
       jacob_t, jacob_n = gradient_checker.compute_gradient(
           x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
       self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
Example #17
0
def dct(input, type=2, n=None, axis=-1, norm=None, name=None):  # pylint: disable=redefined-builtin
  """Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.

  Currently only Type II is supported. Implemented using a length `2N` padded
  @{tf.spectral.rfft}, as described here: https://dsp.stackexchange.com/a/10606

  @compatibility(scipy)
  Equivalent to scipy.fftpack.dct for the Type-II DCT.
  https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
  @end_compatibility

  Args:
    input: A `[..., samples]` `float32` `Tensor` containing the signals to
      take the DCT of.
    type: The DCT type to perform. Must be 2.
    n: For future expansion. The length of the transform. Must be `None`.
    axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
    norm: The normalization to apply. `None` for no normalization or `'ortho'`
      for orthonormal normalization.
    name: An optional name for the operation.

  Returns:
    A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.

  Raises:
    ValueError: If `type` is not `2`, `n` is not `None, `axis` is not `-1`, or
      `norm` is not `None` or `'ortho'`.

  [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
  """
  _validate_dct_arguments(type, n, axis, norm)
  with _ops.name_scope(name, "dct", [input]):
    # We use the RFFT to compute the DCT and TensorFlow only supports float32
    # for FFTs at the moment.
    input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)

    axis_dim = input.shape[-1].value or _array_ops.shape(input)[-1]
    axis_dim_float = _math_ops.to_float(axis_dim)
    scale = 2.0 * _math_ops.exp(_math_ops.complex(
        0.0, -_math.pi * _math_ops.range(axis_dim_float) /
        (2.0 * axis_dim_float)))

    # TODO(rjryan): Benchmark performance and memory usage of the various
    # approaches to computing a DCT via the RFFT.
    dct2 = _math_ops.real(
        rfft(input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)

    if norm == "ortho":
      n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
      n2 = n1 * _math_ops.sqrt(2.0)
      # Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
      weights = _array_ops.pad(
          _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
          constant_values=n2)
      dct2 *= weights

    return dct2
Example #18
0
 def _compareMake(self, real, imag, use_gpu):
   np_ans = real + (1j) * imag
   with self.test_session(use_gpu=use_gpu,
                          force_gpu=use_gpu and test_util.is_gpu_available()):
     real = ops.convert_to_tensor(real)
     imag = ops.convert_to_tensor(imag)
     tf_ans = math_ops.complex(real, imag)
     out = self.evaluate(tf_ans)
   self.assertAllEqual(np_ans, out)
   self.assertShapeEqual(np_ans, tf_ans)
Example #19
0
  def _compareMake(self, real, imag, use_gpu):
    np_ans = real + (1j) * imag

    with test_util.device(use_gpu=use_gpu):
      real = ops.convert_to_tensor(real)
      imag = ops.convert_to_tensor(imag)
      tf_ans = math_ops.complex(real, imag)
      out = self.evaluate(tf_ans)

    self.assertAllEqual(np_ans, out)
    self.assertShapeEqual(np_ans, tf_ans)
  def test_assert_positive_definite_raises_for_negative_real_eigvalues(self):
    with self.test_session():
      diag_x = [1.0, -2.0]
      diag_y = [0., 0.]  # Imaginary eigenvalues should not matter.
      diag = math_ops.complex(diag_x, diag_y)
      operator = linalg.LinearOperatorDiag(diag)

      # is_self_adjoint should not be auto-set for complex diag.
      self.assertTrue(operator.is_self_adjoint is None)
      with self.assertRaisesOpError("non-positive real.*not positive definite"):
        operator.assert_positive_definite().run()
 def Test(self):
   np.random.seed(1)
   n = shape_[-1]
   batch_shape = shape_[:-2]
   np_dtype = dtype_.as_numpy_dtype
   a = np.random.uniform(
       low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   if dtype_.is_complex:
     a += 1j * np.random.uniform(
         low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   a += np.conj(a.T)
   a = np.tile(a, batch_shape + (1, 1))
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(np_dtype).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   # tolerance obtained by looking at actual differences using
   # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
   if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
     tol = 1e-2
   else:
     tol = 1e-7
   with self.session(use_gpu=True):
     tf_a = constant_op.constant(a)
     if compute_v_:
       tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
       # (complex) Eigenvectors are only unique up to an arbitrary phase
       # We normalize the vectors such that the first component has phase 0.
       top_rows = tf_v[..., 0:1, :]
       if tf_a.dtype.is_complex:
         angle = -math_ops.angle(top_rows)
         phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
       else:
         phase = math_ops.sign(top_rows)
       tf_v *= phase
       outputs = [tf_e, tf_v]
     else:
       tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
       outputs = [tf_e]
     for b in outputs:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       if dtype_.is_complex:
         x_init += 1j * np.random.uniform(
             low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       x_init += np.conj(x_init.T)
       x_init = np.tile(x_init, batch_shape + (1, 1))
       theoretical, numerical = gradient_checker.compute_gradient(
           tf_a,
           tf_a.get_shape().as_list(),
           b,
           b.get_shape().as_list(),
           x_init_value=x_init,
           delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
 def Compute(x):
   e, v = linalg_ops.self_adjoint_eig(x)
   # (complex) Eigenvectors are only unique up to an arbitrary phase
   # We normalize the vectors such that the first component has phase 0.
   top_rows = v[..., 0:1, :]
   if dtype_.is_complex:
     angle = -math_ops.angle(top_rows)
     phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
   else:
     phase = math_ops.sign(top_rows)
   v *= phase
   return e, v
Example #23
0
 def _checkGrad(self, func, x, y, use_gpu=False):
   with self.test_session(use_gpu=use_gpu):
     inx = ops.convert_to_tensor(x)
     iny = ops.convert_to_tensor(y)
     # func is a forward or inverse FFT function (batched or unbatched)
     z = func(math_ops.complex(inx, iny))
     # loss = sum(|z|^2)
     loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
     ((x_jacob_t, x_jacob_n),
      (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
          [inx, iny], [list(x.shape), list(y.shape)],
          loss, [1],
          x_init_value=[x, y],
          delta=1e-2)
   self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
   self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
Example #24
0
    def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
        np.random.seed(7)
        if dtype in (dtypes.complex64, dtypes.complex128):
            value = math_ops.complex(
                self._biasedRandN(shape, bias=bias, sigma=sigma), self._biasedRandN(shape, bias=bias, sigma=sigma)
            )
        else:
            value = ops.convert_to_tensor(self._biasedRandN(shape, bias=bias), dtype=dtype)

        with self.test_session(use_gpu=True):
            if dtype in (dtypes.complex64, dtypes.complex128):
                output = math_ops.complex_abs(value)
            else:
                output = math_ops.abs(value)
            error = gradient_checker.compute_gradient_error(value, shape, output, output.get_shape().as_list())
        self.assertLess(error, max_error)
Example #25
0
 def _NormalizingSvd(tf_a):
   tf_s, tf_u, tf_v = linalg_ops.svd(tf_a, compute_uv=True, full_matrices=True)
   # Singular vectors are only unique up to an arbitrary phase. We normalize
   # the vectors such that the first component of u (if m >=n) or v (if n > m)
   # have phase 0.
   m = tf_a.shape[-2]
   n = tf_a.shape[-1]
   if m >= n:
     top_rows = tf_u[..., 0:1, :]
   else:
     top_rows = tf_v[..., 0:1, :]
   if tf_u.dtype.is_complex:
     angle = -math_ops.angle(top_rows)
     phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
   else:
     phase = math_ops.sign(top_rows)
   tf_u *= phase[..., :m]
   tf_v *= phase[..., :n]
   return tf_s, tf_u, tf_v
Example #26
0
 def _compareGradient(self, x):
   # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
   # complex numbers. Then, we extract real and imag parts and
   # computes the squared sum. This is obviously the same as sum(real
   # * real) + sum(imag * imag). We just want to make sure the
   # gradient function is checked.
   with self.cached_session():
     inx = ops.convert_to_tensor(x)
     real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
     real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
     cplx = math_ops.complex(real, imag)
     cplx = math_ops.conj(cplx)
     loss = math_ops.reduce_sum(math_ops.square(
         math_ops.real(cplx))) + math_ops.reduce_sum(
             math_ops.square(math_ops.imag(cplx)))
     epsilon = 1e-3
     jacob_t, jacob_n = gradient_checker.compute_gradient(
         inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
   self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
Example #27
0
  def _checkGradComplex(self, func, x, y, result_is_complex=True,
                        rtol=1e-2, atol=1e-2):
    with self.cached_session(use_gpu=True):
      inx = ops.convert_to_tensor(x)
      iny = ops.convert_to_tensor(y)
      # func is a forward or inverse, real or complex, batched or unbatched FFT
      # function with a complex input.
      z = func(math_ops.complex(inx, iny))
      # loss = sum(|z|^2)
      loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))

      ((x_jacob_t, x_jacob_n),
       (y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
           [inx, iny], [list(x.shape), list(y.shape)],
           loss, [1],
           x_init_value=[x, y],
           delta=1e-2)

    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
    self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
Example #28
0
def _FFT2DGrad(_, grad):
    size = math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
    return math_ops.ifft2d(grad) * math_ops.complex(size, 0.)
Example #29
0
def _ImagGrad(_, grad):
    """Returns 'grad' as the imaginary part and set the real part 0."""
    zero = constant_op.constant(0, dtype=grad.dtype)
    return math_ops.complex(zero, grad)
Example #30
0
def _IFFT2DGrad(_, grad):
    rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
    return math_ops.fft2d(grad) * math_ops.complex(rsize, 0.)
Example #31
0
def _FFT3DGrad(_, grad):
    size = math_ops.cast(array_ops.size(grad), dtypes.float32)
    return math_ops.ifft3d(grad) * math_ops.complex(size, 0.)
Example #32
0
def _ComplexAbsGrad(op, grad):
    """Returns the gradient of ComplexAbs."""
    # TODO(b/27786104): The cast to complex could be removed once arithmetic
    # supports mixtures of complex64 and real values.
    return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
            math_ops.sign(op.inputs[0]))
Example #33
0
def _IFFT3DGrad(_, grad):
    rsize = 1. / math_ops.cast(array_ops.size(grad), dtypes.float32)
    return math_ops.fft3d(grad) * math_ops.complex(rsize, 0.)
Example #34
0
 def complex_dataset_factory():
     return dataset_ops.Dataset.from_tensor_slices(
         math_ops.complex(random_input, complex_component))
Example #35
0
 def __call__(self):
     return math_ops.complex(constant_op.constant(1.),
                             constant_op.constant(2.),
                             name="complex")
Example #36
0
def _BatchIFFT3DGrad(_, grad):
    rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
    return math_ops.batch_fft3d(grad) * math_ops.complex(rsize, 0.)
Example #37
0
def auto_correlation(
    x,
    axis=-1,
    max_lags=None,
    center=True,
    normalize=True,
    name="auto_correlation"):
  """Auto correlation along one axis.

  Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
  `RXX` may be defined as  (with `E` expectation and `Conj` complex conjugate)

  ```
  RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
  W[n]   := (X[n] - MU) / S,
  MU     := E{ X[0] },
  S**2   := E{ (X[0] - MU) Conj(X[0] - MU) }.
  ```

  This function takes the viewpoint that `x` is (along one axis) a finite
  sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
  estimate of `RXX[m]` as follows:

  After extending `x` from length `L` to `inf` by zero padding, the auto
  correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as

  ```
  rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
  w[n]   := (x[n] - mu) / s,
  mu     := L**-1 sum_n x[n],
  s**2   := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
  ```

  The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
  often set `max_lags` small enough so that the entire output is meaningful.

  Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
  `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
  contains a slight bias, which goes to zero as `len(x) - m --> infinity`.

  Args:
    x:  `float32` or `complex64` `Tensor`.
    axis:  Python `int`. The axis number along which to compute correlation.
      Other dimensions index different batch members.
    max_lags:  Positive `int` tensor.  The maximum value of `m` to consider
      (in equation above).  If `max_lags >= x.shape[axis]`, we effectively
      re-set `max_lags` to `x.shape[axis] - 1`.
    center:  Python `bool`.  If `False`, do not subtract the mean estimate `mu`
      from `x[n]` when forming `w[n]`.
    normalize:  Python `bool`.  If `False`, do not divide by the variance
      estimate `s**2` when forming `w[n]`.
    name:  `String` name to prepend to created ops.

  Returns:
    `rxx`: `Tensor` of same `dtype` as `x`.  `rxx.shape[i] = x.shape[i]` for
      `i != axis`, and `rxx.shape[axis] = max_lags + 1`.

  Raises:
    TypeError:  If `x` is not a supported type.
  """
  # Implementation details:
  # Extend length N / 2 1-D array x to length N by zero padding onto the end.
  # Then, set
  #   F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
  # It is not hard to see that
  #   F[x]_k Conj(F[x]_k) = F[R]_k, where
  #   R_m := sum_n x_n Conj(x_{(n - m) mod N}).
  # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].

  # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
  # based version of estimating RXX.
  # Note that this is a special case of the Wiener-Khinchin Theorem.
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")

    # Rotate dimensions of x in order to put axis at the rightmost dim.
    # FFT op requires this.
    rank = util.prefer_static_rank(x)
    if axis < 0:
      axis = rank + axis
    shift = rank - 1 - axis
    # Suppose x.shape[axis] = T, so there are T "time" steps.
    #   ==> x_rotated.shape = B + [T],
    # where B is x_rotated's batch shape.
    x_rotated = util.rotate_transpose(x, shift)

    if center:
      x_rotated -= math_ops.reduce_mean(x_rotated, axis=-1, keepdims=True)

    # x_len = N / 2 from above explanation.  The length of x along axis.
    # Get a value for x_len that works in all cases.
    x_len = util.prefer_static_shape(x_rotated)[-1]

    # TODO(langmore) Investigate whether this zero padding helps or hurts.  At
    # the moment is is necessary so that all FFT implementations work.
    # Zero pad to the next power of 2 greater than 2 * x_len, which equals
    # 2**(ceil(Log_2(2 * x_len))).  Note: Log_2(X) = Log_e(X) / Log_e(2).
    x_len_float64 = math_ops.cast(x_len, np.float64)
    target_length = math_ops.pow(
        np.float64(2.),
        math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
    pad_length = math_ops.cast(target_length - x_len_float64, np.int32)

    # We should have:
    # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
    #                     = B + [T + pad_length]
    x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)

    dtype = x.dtype
    if not dtype.is_complex:
      if not dtype.is_floating:
        raise TypeError("Argument x must have either float or complex dtype"
                        " found: {}".format(dtype))
      x_rotated_pad = math_ops.complex(x_rotated_pad,
                                       dtype.real_dtype.as_numpy_dtype(0.))

    # Autocorrelation is IFFT of power-spectral density (up to some scaling).
    fft_x_rotated_pad = spectral_ops.fft(x_rotated_pad)
    spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
    # shifted_product is R[m] from above detailed explanation.
    # It is the inner product sum_n X[n] * Conj(X[n - m]).
    shifted_product = spectral_ops.ifft(spectral_density)

    # Cast back to real-valued if x was real to begin with.
    shifted_product = math_ops.cast(shifted_product, dtype)

    # Figure out if we can deduce the final static shape, and set max_lags.
    # Use x_rotated as a reference, because it has the time dimension in the far
    # right, and was created before we performed all sorts of crazy shape
    # manipulations.
    know_static_shape = True
    if not x_rotated.shape.is_fully_defined():
      know_static_shape = False
    if max_lags is None:
      max_lags = x_len - 1
    else:
      max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
      max_lags_ = tensor_util.constant_value(max_lags)
      if max_lags_ is None or not know_static_shape:
        know_static_shape = False
        max_lags = math_ops.minimum(x_len - 1, max_lags)
      else:
        max_lags = min(x_len - 1, max_lags_)

    # Chop off the padding.
    # We allow users to provide a huge max_lags, but cut it off here.
    # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
    shifted_product_chopped = shifted_product[..., :max_lags + 1]

    # If possible, set shape.
    if know_static_shape:
      chopped_shape = x_rotated.shape.as_list()
      chopped_shape[-1] = min(x_len, max_lags + 1)
      shifted_product_chopped.set_shape(chopped_shape)

    # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]).  The
    # other terms were zeros arising only due to zero padding.
    # `denominator = (N / 2 - m)` (defined below) is the proper term to
    # divide by by to make this an unbiased estimate of the expectation
    # E[X[n] Conj(X[n - m])].
    x_len = math_ops.cast(x_len, dtype.real_dtype)
    max_lags = math_ops.cast(max_lags, dtype.real_dtype)
    denominator = x_len - math_ops.range(0., max_lags + 1.)
    denominator = math_ops.cast(denominator, dtype)
    shifted_product_rotated = shifted_product_chopped / denominator

    if normalize:
      shifted_product_rotated /= shifted_product_rotated[..., :1]

    # Transpose dimensions back to those of x.
    return util.rotate_transpose(shifted_product_rotated, -shift)
Example #38
0
def _ImagGrad(_, grad):
  """Returns 'grad' as the imaginary part and set the real part 0."""
  zero = constant_op.constant(0, dtype=grad.dtype)
  return math_ops.complex(zero, grad)
Example #39
0
def _BatchFFT3DGrad(_, grad):
    size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
    return math_ops.batch_ifft3d(grad) * math_ops.complex(size, 0.)
Example #40
0
def _ComplexAbsGrad(op, grad):
  """Returns the gradient of ComplexAbs."""
  # TODO(b/27786104): The cast to complex could be removed once arithmetic
  # supports mixtures of complex64 and real values.
  return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(
      op.inputs[0]))
Example #41
0
 def f0(i, j):
   return math_ops.complex(i, j, name="double_nested_complex")