def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
        """Creates a circulant matrix from a spectrum.

    Intentionally done in an explicit yet inefficient way.  This provides a
    cross check to the main code that uses fancy reshapes.

    Args:
      spectrum: Float or complex `Tensor`.
      shape:  Python list.  Desired shape of returned matrix.
      dtype:  Type to cast the returned matrix to.

    Returns:
      Circulant (batch) matrix of desired `dtype`.
    """
        spectrum = _to_complex(spectrum)
        spectrum_shape = self._shape_to_spectrum_shape(shape)
        domain_dimension = spectrum_shape[-1]
        if not domain_dimension:
            return array_ops.zeros(shape, dtype)

        # Explicitly compute the action of spectrum on basis vectors.
        matrix_rows = []
        for m in range(domain_dimension):
            x = np.zeros([domain_dimension])
            # x is a basis vector.
            x[m] = 1.0
            fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
            h_convolve_x = fft_ops.ifft(spectrum * fft_x)
            matrix_rows.append(h_convolve_x)
        matrix = array_ops.stack(matrix_rows, axis=-1)
        return math_ops.cast(matrix, dtype)
  def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
    """Creates a circulant matrix from a spectrum.

    Intentionally done in an explicit yet inefficient way.  This provides a
    cross check to the main code that uses fancy reshapes.

    Args:
      spectrum: Float or complex `Tensor`.
      shape:  Python list.  Desired shape of returned matrix.
      dtype:  Type to cast the returned matrix to.

    Returns:
      Circulant (batch) matrix of desired `dtype`.
    """
    spectrum = _to_complex(spectrum)
    spectrum_shape = self._shape_to_spectrum_shape(shape)
    domain_dimension = spectrum_shape[-1]
    if not domain_dimension:
      return array_ops.zeros(shape, dtype)

    # Explicitly compute the action of spectrum on basis vectors.
    matrix_rows = []
    for m in range(domain_dimension):
      x = np.zeros([domain_dimension])
      # x is a basis vector.
      x[m] = 1.0
      fft_x = fft_ops.fft(x.astype(np.complex64))
      h_convolve_x = fft_ops.ifft(spectrum * fft_x)
      matrix_rows.append(h_convolve_x)
    matrix = array_ops.stack(matrix_rows, axis=-1)
    return math_ops.cast(matrix, dtype)
    def _operator_and_matrix(self,
                             build_info,
                             dtype,
                             use_placeholder,
                             ensure_self_adjoint_and_pd=False):
        shape = build_info.shape
        # For this test class, we are creating Hermitian spectrums.
        # We also want the spectrum to have eigenvalues bounded away from zero.
        #
        # pre_spectrum is bounded away from zero.
        pre_spectrum = linear_operator_test_util.random_uniform(
            shape=self._shape_to_spectrum_shape(shape),
            dtype=dtype,
            minval=1.,
            maxval=2.)
        pre_spectrum = math_ops.cast(math_ops.abs(pre_spectrum), dtype=dtype)
        pre_spectrum_c = _to_complex(pre_spectrum)

        # Real{IFFT[pre_spectrum]}
        #  = IFFT[EvenPartOf[pre_spectrum]]
        # is the IFFT of something that is also bounded away from zero.
        # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
        pre_h = fft_ops.ifft(pre_spectrum_c)

        # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
        # So we will make spectrum = FFT[h], for real valued h.
        h = math_ops.real(pre_h)
        h_c = _to_complex(h)

        spectrum = fft_ops.fft(h_c)

        lin_op_spectrum = spectrum

        if use_placeholder:
            lin_op_spectrum = array_ops.placeholder_with_default(spectrum,
                                                                 shape=None)

        operator = linalg.LinearOperatorCirculant(
            lin_op_spectrum,
            input_output_dtype=dtype,
            is_positive_definite=True if ensure_self_adjoint_and_pd else None,
            is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        )

        mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)

        return operator, mat
  def operator_and_matrix(
      self, shape_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    shape = shape_info.shape
    # For this test class, we are creating Hermitian spectrums.
    # We also want the spectrum to have eigenvalues bounded away from zero.
    #
    # pre_spectrum is bounded away from zero.
    pre_spectrum = linear_operator_test_util.random_uniform(
        shape=self._shape_to_spectrum_shape(shape),
        dtype=dtype,
        minval=1.,
        maxval=2.)
    pre_spectrum = math_ops.cast(math_ops.abs(pre_spectrum), dtype=dtype)
    pre_spectrum_c = _to_complex(pre_spectrum)

    # Real{IFFT[pre_spectrum]}
    #  = IFFT[EvenPartOf[pre_spectrum]]
    # is the IFFT of something that is also bounded away from zero.
    # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
    pre_h = fft_ops.ifft(pre_spectrum_c)

    # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
    # So we will make spectrum = FFT[h], for real valued h.
    h = math_ops.real(pre_h)
    h_c = _to_complex(h)

    spectrum = fft_ops.fft(h_c)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant(
        lin_op_spectrum,
        input_output_dtype=dtype,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
    )

    mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)

    return operator, mat
Example #5
0
def ifft(input, name=None):
    return fft_ops.ifft(input, name)
Example #6
0
def auto_correlation(
    x,
    axis=-1,
    max_lags=None,
    center=True,
    normalize=True,
    name="auto_correlation"):
  """Auto correlation along one axis.

  Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
  `RXX` may be defined as  (with `E` expectation and `Conj` complex conjugate)

  ```
  RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
  W[n]   := (X[n] - MU) / S,
  MU     := E{ X[0] },
  S**2   := E{ (X[0] - MU) Conj(X[0] - MU) }.
  ```

  This function takes the viewpoint that `x` is (along one axis) a finite
  sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
  estimate of `RXX[m]` as follows:

  After extending `x` from length `L` to `inf` by zero padding, the auto
  correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as

  ```
  rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
  w[n]   := (x[n] - mu) / s,
  mu     := L**-1 sum_n x[n],
  s**2   := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
  ```

  The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
  often set `max_lags` small enough so that the entire output is meaningful.

  Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
  `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
  contains a slight bias, which goes to zero as `len(x) - m --> infinity`.

  Args:
    x:  `float32` or `complex64` `Tensor`.
    axis:  Python `int`. The axis number along which to compute correlation.
      Other dimensions index different batch members.
    max_lags:  Positive `int` tensor.  The maximum value of `m` to consider
      (in equation above).  If `max_lags >= x.shape[axis]`, we effectively
      re-set `max_lags` to `x.shape[axis] - 1`.
    center:  Python `bool`.  If `False`, do not subtract the mean estimate `mu`
      from `x[n]` when forming `w[n]`.
    normalize:  Python `bool`.  If `False`, do not divide by the variance
      estimate `s**2` when forming `w[n]`.
    name:  `String` name to prepend to created ops.

  Returns:
    `rxx`: `Tensor` of same `dtype` as `x`.  `rxx.shape[i] = x.shape[i]` for
      `i != axis`, and `rxx.shape[axis] = max_lags + 1`.

  Raises:
    TypeError:  If `x` is not a supported type.
  """
  # Implementation details:
  # Extend length N / 2 1-D array x to length N by zero padding onto the end.
  # Then, set
  #   F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
  # It is not hard to see that
  #   F[x]_k Conj(F[x]_k) = F[R]_k, where
  #   R_m := sum_n x_n Conj(x_{(n - m) mod N}).
  # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].

  # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
  # based version of estimating RXX.
  # Note that this is a special case of the Wiener-Khinchin Theorem.
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")

    # Rotate dimensions of x in order to put axis at the rightmost dim.
    # FFT op requires this.
    rank = util.prefer_static_rank(x)
    if axis < 0:
      axis = rank + axis
    shift = rank - 1 - axis
    # Suppose x.shape[axis] = T, so there are T "time" steps.
    #   ==> x_rotated.shape = B + [T],
    # where B is x_rotated's batch shape.
    x_rotated = util.rotate_transpose(x, shift)

    if center:
      x_rotated -= math_ops.reduce_mean(x_rotated, axis=-1, keepdims=True)

    # x_len = N / 2 from above explanation.  The length of x along axis.
    # Get a value for x_len that works in all cases.
    x_len = util.prefer_static_shape(x_rotated)[-1]

    # TODO(langmore) Investigate whether this zero padding helps or hurts.  At
    # the moment is necessary so that all FFT implementations work.
    # Zero pad to the next power of 2 greater than 2 * x_len, which equals
    # 2**(ceil(Log_2(2 * x_len))).  Note: Log_2(X) = Log_e(X) / Log_e(2).
    x_len_float64 = math_ops.cast(x_len, np.float64)
    target_length = math_ops.pow(
        np.float64(2.),
        math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
    pad_length = math_ops.cast(target_length - x_len_float64, np.int32)

    # We should have:
    # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
    #                     = B + [T + pad_length]
    x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)

    dtype = x.dtype
    if not dtype.is_complex:
      if not dtype.is_floating:
        raise TypeError("Argument x must have either float or complex dtype"
                        " found: {}".format(dtype))
      x_rotated_pad = math_ops.complex(x_rotated_pad,
                                       dtype.real_dtype.as_numpy_dtype(0.))

    # Autocorrelation is IFFT of power-spectral density (up to some scaling).
    fft_x_rotated_pad = fft_ops.fft(x_rotated_pad)
    spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
    # shifted_product is R[m] from above detailed explanation.
    # It is the inner product sum_n X[n] * Conj(X[n - m]).
    shifted_product = fft_ops.ifft(spectral_density)

    # Cast back to real-valued if x was real to begin with.
    shifted_product = math_ops.cast(shifted_product, dtype)

    # Figure out if we can deduce the final static shape, and set max_lags.
    # Use x_rotated as a reference, because it has the time dimension in the far
    # right, and was created before we performed all sorts of crazy shape
    # manipulations.
    know_static_shape = True
    if not x_rotated.shape.is_fully_defined():
      know_static_shape = False
    if max_lags is None:
      max_lags = x_len - 1
    else:
      max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
      max_lags_ = tensor_util.constant_value(max_lags)
      if max_lags_ is None or not know_static_shape:
        know_static_shape = False
        max_lags = math_ops.minimum(x_len - 1, max_lags)
      else:
        max_lags = min(x_len - 1, max_lags_)

    # Chop off the padding.
    # We allow users to provide a huge max_lags, but cut it off here.
    # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
    shifted_product_chopped = shifted_product[..., :max_lags + 1]

    # If possible, set shape.
    if know_static_shape:
      chopped_shape = x_rotated.shape.as_list()
      chopped_shape[-1] = min(x_len, max_lags + 1)
      shifted_product_chopped.set_shape(chopped_shape)

    # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]).  The
    # other terms were zeros arising only due to zero padding.
    # `denominator = (N / 2 - m)` (defined below) is the proper term to
    # divide by to make this an unbiased estimate of the expectation
    # E[X[n] Conj(X[n - m])].
    x_len = math_ops.cast(x_len, dtype.real_dtype)
    max_lags = math_ops.cast(max_lags, dtype.real_dtype)
    denominator = x_len - math_ops.range(0., max_lags + 1.)
    denominator = math_ops.cast(denominator, dtype)
    shifted_product_rotated = shifted_product_chopped / denominator

    if normalize:
      shifted_product_rotated /= shifted_product_rotated[..., :1]

    # Transpose dimensions back to those of x.
    return util.rotate_transpose(shifted_product_rotated, -shift)
def training(samples, labels, canvas):
    shape_1 = np.shape(samples)[0]
    shape_2 = np.shape(samples)[1]

    for i in range(len(labels)):
        labels[i] = int(labels[i])

    labels = np.array(labels, dtype=np.float32)
    np.reshape(labels, (shape_1, 1))

    tf.compat.v1.disable_eager_execution()

    # задание тренируемого значения V и вычислительного графа.
    # обратите внимание, что вычисления фурье образа должно идти по строкам, т.к на входе матрица их сэмплов(батч)

    # задаем "веса"
    V = tf.Variable(shape_2 * [1.0], dtype=tf.float32, name="filtr_vector", trainable=True)

    # задаем вход x и выход output с вычислительным узлом tf.norm(...)

    batch_size = 16
    # зададим датасет в виде списка, чтобы было удобно его перемешивать

    x = tf.compat.v1.placeholder(tf.float32, [batch_size, shape_2], "data")

    output = tf.norm(tf.abs(ifft(tf.multiply(fft(tf.complex(x / tf.norm(x, axis=0), 0.)), tf.complex(V, 0.)))),
                     axis=1) / tf.norm(x)

    # задаем столбец меток для бача и функцию ошибки
    target = tf.compat.v1.placeholder(tf.float32, batch_size, "labels")
    cost = tf.reduce_mean(tf.pow(tf.pow(output, 2) - target, 2))

    optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)

    gvs = optimizer.compute_gradients(cost)
    # нужно обрезать градиенты, если они вдруг "взорвутся"
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]

    train_op = optimizer.apply_gradients(capped_gvs)

    # для контроля времени тренировки
    start_time = time.time()

    with tf.compat.v1.Session() as sess:
        sess.run(V.initializer)

        epoch = 8000

        losses = np.zeros(epoch)

        for i in range(epoch):


            if batch_size == shape_1:
                batch_of_samples = samples
                batch_of_labels = labels
            else:
                random_indeces= np.random.randint(0, shape_1 - batch_size)
                batch_of_samples = samples[random_indeces :random_indeces+batch_size]
                batch_of_labels = labels[random_indeces : random_indeces+batch_size]

            losses[i] = sess.run(cost, {x: batch_of_samples, target: batch_of_labels})
            sess.run(train_op, {x: batch_of_samples, target: batch_of_labels})

        tmp_V = V.eval()

    #  и фиксируем затраченное время
    elapsed_time = time.time() - start_time

    #запишем обученный вектор на диск:

    path_for_filtr="D:\\samples_for_kursach\\filter_vector"
    write_filtr(tmp_V, path_for_filtr)

    # теперь визуализируем, как шла тренировка
    canvas.axes.set_xticks([])
    canvas.axes.set_yticks([])
    canvas.axes.set_title("time spent on training: " + str(elapsed_time)[:5] + "  sek" + "        batch size "+ str(batch_size))

    canvas.axes = canvas.fig.add_subplot(121)
    canvas.plot_linear_signal(np.arange(epoch), losses)
    canvas.axes.set_xlabel('number of epochs')
    canvas.axes.set_ylabel('Loss')
    # canvas.axes.set_yscale("log")

    canvas.axes = canvas.fig.add_subplot(122)

    disc = 1000
    freq = np.fft.fftfreq(shape_2, disc)

    output_V = tmp_V[: -int(shape_2 / 2)]
    freq = freq[freq >= 0]


    canvas.plot_linear_signal(freq * disc, output_V)
    canvas.axes.set_xlabel('Hz')
    canvas.axes.set_ylabel('filter vector')

    #запишем фигуру в pdf

    file_with_id2 = open(path_for_filtr + "\\unic_tmp_id.txt", "r")
    id = file_with_id2.read()
    file_with_id2.close()

    id  = int(id) - 1

    tmp_pdf_file = PdfPages(path_for_filtr + "\\vec_" + str(id) + ".pdf")
    tmp_pdf_file.savefig(canvas.fig)
    tmp_pdf_file.close()
def auto_correlation(x,
                     axis=-1,
                     max_lags=None,
                     center=True,
                     normalize=True,
                     name="auto_correlation"):
    """Auto correlation along one axis.

  Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
  `RXX` may be defined as  (with `E` expectation and `Conj` complex conjugate)

  ```
  RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
  W[n]   := (X[n] - MU) / S,
  MU     := E{ X[0] },
  S**2   := E{ (X[0] - MU) Conj(X[0] - MU) }.
  ```

  This function takes the viewpoint that `x` is (along one axis) a finite
  sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
  estimate of `RXX[m]` as follows:

  After extending `x` from length `L` to `inf` by zero padding, the auto
  correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as

  ```
  rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
  w[n]   := (x[n] - mu) / s,
  mu     := L**-1 sum_n x[n],
  s**2   := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
  ```

  The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
  often set `max_lags` small enough so that the entire output is meaningful.

  Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
  `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
  contains a slight bias, which goes to zero as `len(x) - m --> infinity`.

  Args:
    x:  `float32` or `complex64` `Tensor`.
    axis:  Python `int`. The axis number along which to compute correlation.
      Other dimensions index different batch members.
    max_lags:  Positive `int` tensor.  The maximum value of `m` to consider
      (in equation above).  If `max_lags >= x.shape[axis]`, we effectively
      re-set `max_lags` to `x.shape[axis] - 1`.
    center:  Python `bool`.  If `False`, do not subtract the mean estimate `mu`
      from `x[n]` when forming `w[n]`.
    normalize:  Python `bool`.  If `False`, do not divide by the variance
      estimate `s**2` when forming `w[n]`.
    name:  `String` name to prepend to created ops.

  Returns:
    `rxx`: `Tensor` of same `dtype` as `x`.  `rxx.shape[i] = x.shape[i]` for
      `i != axis`, and `rxx.shape[axis] = max_lags + 1`.

  Raises:
    TypeError:  If `x` is not a supported type.
  """
    # Implementation details:
    # Extend length N / 2 1-D array x to length N by zero padding onto the end.
    # Then, set
    #   F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
    # It is not hard to see that
    #   F[x]_k Conj(F[x]_k) = F[R]_k, where
    #   R_m := sum_n x_n Conj(x_{(n - m) mod N}).
    # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].

    # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
    # based version of estimating RXX.
    # Note that this is a special case of the Wiener-Khinchin Theorem.
    with ops.name_scope(name, values=[x]):
        x = ops.convert_to_tensor(x, name="x")

        # Rotate dimensions of x in order to put axis at the rightmost dim.
        # FFT op requires this.
        rank = util.prefer_static_rank(x)
        if axis < 0:
            axis = rank + axis
        shift = rank - 1 - axis
        # Suppose x.shape[axis] = T, so there are T "time" steps.
        #   ==> x_rotated.shape = B + [T],
        # where B is x_rotated's batch shape.
        x_rotated = util.rotate_transpose(x, shift)

        if center:
            x_rotated -= math_ops.reduce_mean(x_rotated,
                                              axis=-1,
                                              keepdims=True)

        # x_len = N / 2 from above explanation.  The length of x along axis.
        # Get a value for x_len that works in all cases.
        x_len = util.prefer_static_shape(x_rotated)[-1]

        # TODO(langmore) Investigate whether this zero padding helps or hurts.  At
        # the moment is necessary so that all FFT implementations work.
        # Zero pad to the next power of 2 greater than 2 * x_len, which equals
        # 2**(ceil(Log_2(2 * x_len))).  Note: Log_2(X) = Log_e(X) / Log_e(2).
        x_len_float64 = math_ops.cast(x_len, np.float64)
        target_length = math_ops.pow(
            np.float64(2.),
            math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
        pad_length = math_ops.cast(target_length - x_len_float64, np.int32)

        # We should have:
        # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
        #                     = B + [T + pad_length]
        x_rotated_pad = util.pad(x_rotated,
                                 axis=-1,
                                 back=True,
                                 count=pad_length)

        dtype = x.dtype
        if not dtype.is_complex:
            if not dtype.is_floating:
                raise TypeError(
                    "Argument x must have either float or complex dtype"
                    " found: {}".format(dtype))
            x_rotated_pad = math_ops.complex(
                x_rotated_pad, dtype.real_dtype.as_numpy_dtype(0.))

        # Autocorrelation is IFFT of power-spectral density (up to some scaling).
        fft_x_rotated_pad = fft_ops.fft(x_rotated_pad)
        spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
        # shifted_product is R[m] from above detailed explanation.
        # It is the inner product sum_n X[n] * Conj(X[n - m]).
        shifted_product = fft_ops.ifft(spectral_density)

        # Cast back to real-valued if x was real to begin with.
        shifted_product = math_ops.cast(shifted_product, dtype)

        # Figure out if we can deduce the final static shape, and set max_lags.
        # Use x_rotated as a reference, because it has the time dimension in the far
        # right, and was created before we performed all sorts of crazy shape
        # manipulations.
        know_static_shape = True
        if not x_rotated.shape.is_fully_defined():
            know_static_shape = False
        if max_lags is None:
            max_lags = x_len - 1
        else:
            max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
            max_lags_ = tensor_util.constant_value(max_lags)
            if max_lags_ is None or not know_static_shape:
                know_static_shape = False
                max_lags = math_ops.minimum(x_len - 1, max_lags)
            else:
                max_lags = min(x_len - 1, max_lags_)

        # Chop off the padding.
        # We allow users to provide a huge max_lags, but cut it off here.
        # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
        shifted_product_chopped = shifted_product[..., :max_lags + 1]

        # If possible, set shape.
        if know_static_shape:
            chopped_shape = x_rotated.shape.as_list()
            chopped_shape[-1] = min(x_len, max_lags + 1)
            shifted_product_chopped.set_shape(chopped_shape)

        # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]).  The
        # other terms were zeros arising only due to zero padding.
        # `denominator = (N / 2 - m)` (defined below) is the proper term to
        # divide by to make this an unbiased estimate of the expectation
        # E[X[n] Conj(X[n - m])].
        x_len = math_ops.cast(x_len, dtype.real_dtype)
        max_lags = math_ops.cast(max_lags, dtype.real_dtype)
        denominator = x_len - math_ops.range(0., max_lags + 1.)
        denominator = math_ops.cast(denominator, dtype)
        shifted_product_rotated = shifted_product_chopped / denominator

        if normalize:
            shifted_product_rotated /= shifted_product_rotated[..., :1]

        # Transpose dimensions back to those of x.
        return util.rotate_transpose(shifted_product_rotated, -shift)