コード例 #1
0
  def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
    """Creates a circulant matrix from a spectrum.

    Intentionally done in an explicit yet inefficient way.  This provides a
    cross check to the main code that uses fancy reshapes.

    Args:
      spectrum: Float or complex `Tensor`.
      shape:  Python list.  Desired shape of returned matrix.
      dtype:  Type to cast the returned matrix to.

    Returns:
      Circulant (batch) matrix of desired `dtype`.
    """
    spectrum = _to_complex(spectrum)
    spectrum_shape = self._shape_to_spectrum_shape(shape)
    domain_dimension = spectrum_shape[-1]
    if not domain_dimension:
      return array_ops.zeros(shape, dtype)

    # Explicitly compute the action of spectrum on basis vectors.
    matrix_rows = []
    for m in range(domain_dimension):
      x = np.zeros([domain_dimension])
      # x is a basis vector.
      x[m] = 1.0
      fft_x = fft_ops.fft(x.astype(np.complex64))
      h_convolve_x = fft_ops.ifft(spectrum * fft_x)
      matrix_rows.append(h_convolve_x)
    matrix = array_ops.stack(matrix_rows, axis=-1)
    return math_ops.cast(matrix, dtype)
コード例 #2
0
    def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
        """Creates a circulant matrix from a spectrum.

    Intentionally done in an explicit yet inefficient way.  This provides a
    cross check to the main code that uses fancy reshapes.

    Args:
      spectrum: Float or complex `Tensor`.
      shape:  Python list.  Desired shape of returned matrix.
      dtype:  Type to cast the returned matrix to.

    Returns:
      Circulant (batch) matrix of desired `dtype`.
    """
        spectrum = _to_complex(spectrum)
        spectrum_shape = self._shape_to_spectrum_shape(shape)
        domain_dimension = spectrum_shape[-1]
        if not domain_dimension:
            return array_ops.zeros(shape, dtype)

        # Explicitly compute the action of spectrum on basis vectors.
        matrix_rows = []
        for m in range(domain_dimension):
            x = np.zeros([domain_dimension])
            # x is a basis vector.
            x[m] = 1.0
            fft_x = fft_ops.fft(math_ops.cast(x, spectrum.dtype))
            h_convolve_x = fft_ops.ifft(spectrum * fft_x)
            matrix_rows.append(h_convolve_x)
        matrix = array_ops.stack(matrix_rows, axis=-1)
        return math_ops.cast(matrix, dtype)
コード例 #3
0
    def _matmul(self, x, adjoint=False, adjoint_arg=False):
        # Given a Toeplitz matrix, we can embed it in a Circulant matrix to perform
        # efficient matrix multiplications. Given a Toeplitz matrix with first row
        # [t_0, t_1, ... t_{n-1}] and first column [t0, t_{-1}, ..., t_{-(n-1)},
        # let C by the circulant matrix with first column [t0, t_{-1}, ...,
        # t_{-(n-1)}, 0, t_{n-1}, ..., t_1]. Also adjoin to our input vector `x`
        # `n` zeros, to make it a vector of length `2n` (call it y). It can be shown
        # that if we take the first n entries of `Cy`, this is equal to the Toeplitz
        # multiplication. See:
        # http://math.mit.edu/icg/resources/teaching/18.085-spring2015/toeplitz.pdf
        # for more details.
        x = linalg.adjoint(x) if adjoint_arg else x
        expanded_x = array_ops.concat([x, array_ops.zeros_like(x)], axis=-2)
        col = ops.convert_to_tensor(self.col)
        row = ops.convert_to_tensor(self.row)
        circulant_col = array_ops.concat([
            col,
            array_ops.zeros_like(col[..., 0:1]),
            array_ops.reverse(row[..., 1:], axis=[-1])
        ],
                                         axis=-1)
        circulant = linear_operator_circulant.LinearOperatorCirculant(
            fft_ops.fft(_to_complex(circulant_col)),
            input_output_dtype=row.dtype)
        result = circulant.matmul(expanded_x,
                                  adjoint=adjoint,
                                  adjoint_arg=False)

        shape = self._shape_tensor(row=row, col=col)
        return math_ops.cast(
            result[..., :self._domain_dimension_tensor(shape=shape), :],
            self.dtype)
コード例 #4
0
  def __init__(self,
               col,
               row,
               is_non_singular=None,
               is_self_adjoint=None,
               is_positive_definite=None,
               is_square=None,
               name="LinearOperatorToeplitz"):
    r"""Initialize a `LinearOperatorToeplitz`.

    Args:
      col: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
        The first column of the operator. Allowed dtypes: `float16`, `float32`,
          `float64`, `complex64`, `complex128`. Note that the first entry of
          `col` is assumed to be the same as the first entry of `row`.
      row: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
        The first row of the operator. Allowed dtypes: `float16`, `float32`,
          `float64`, `complex64`, `complex128`. Note that the first entry of
          `row` is assumed to be the same as the first entry of `col`.
      is_non_singular:  Expect that this operator is non-singular.
      is_self_adjoint:  Expect that this operator is equal to its hermitian
        transpose.  If `diag.dtype` is real, this is auto-set to `True`.
      is_positive_definite:  Expect that this operator is positive definite,
        meaning the quadratic form `x^H A x` has positive real part for all
        nonzero `x`.  Note that we do not require the operator to be
        self-adjoint to be positive-definite.  See:
        https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
      is_square:  Expect that this operator acts like square [batch] matrices.
      name: A name for this `LinearOperator`.
    """

    with ops.name_scope(name, values=[row, col]):
      self._row = ops.convert_to_tensor(row, name="row")
      self._col = ops.convert_to_tensor(col, name="col")
      self._check_row_col(self._row, self._col)

      circulant_col = array_ops.concat(
          [self._col,
           array_ops.zeros_like(self._col[..., 0:1]),
           array_ops.reverse(self._row[..., 1:], axis=[-1])], axis=-1)

      # To be used for matmul.
      self._circulant = linear_operator_circulant.LinearOperatorCirculant(
          fft_ops.fft(_to_complex(circulant_col)),
          input_output_dtype=self._row.dtype)

      if is_square is False:  # pylint:disable=g-bool-id-comparison
        raise ValueError("Only square Toeplitz operators currently supported.")
      is_square = True

      super(LinearOperatorToeplitz, self).__init__(
          dtype=self._row.dtype,
          graph_parents=[self._row, self._col],
          is_non_singular=is_non_singular,
          is_self_adjoint=is_self_adjoint,
          is_positive_definite=is_positive_definite,
          is_square=is_square,
          name=name)
コード例 #5
0
 def test_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
   with self.cached_session():
     # Make spectrum the FFT of a real convolution kernel h.  This ensures that
     # spectrum is Hermitian.
     h = linear_operator_test_util.random_normal(shape=(3, 4))
     spectrum = fft_ops.fft(math_ops.cast(h, dtypes.complex64))
     operator = linalg.LinearOperatorCirculant(
         spectrum, input_output_dtype=dtypes.complex64)
     matrix = operator.to_dense()
     imag_matrix = math_ops.imag(matrix)
     eps = np.finfo(np.float32).eps
     np.testing.assert_allclose(
         0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3 * 4)
コード例 #6
0
 def test_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
   with self.cached_session():
     # Make spectrum the FFT of a real convolution kernel h.  This ensures that
     # spectrum is Hermitian.
     h = linear_operator_test_util.random_normal(shape=(3, 4))
     spectrum = fft_ops.fft(math_ops.cast(h, dtypes.complex64))
     operator = linalg.LinearOperatorCirculant(
         spectrum, input_output_dtype=dtypes.complex64)
     matrix = operator.to_dense()
     imag_matrix = math_ops.imag(matrix)
     eps = np.finfo(np.float32).eps
     np.testing.assert_allclose(
         0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3 * 4)
コード例 #7
0
    def test_defining_operator_using_real_convolution_kernel(self):
        with self.cached_session():
            convolution_kernel = [1., 2., 1.]
            spectrum = fft_ops.fft(
                math_ops.cast(convolution_kernel, dtypes.complex64))

            # spectrum is shape [3] ==> operator is shape [3, 3]
            # spectrum is Hermitian ==> operator is real.
            operator = linalg.LinearOperatorCirculant(spectrum)

            # Allow for complex output so we can make sure it has zero imag part.
            self.assertEqual(operator.dtype, dtypes.complex64)

            matrix = operator.to_dense().eval()
            np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
コード例 #8
0
  def test_defining_operator_using_real_convolution_kernel(self):
    with self.cached_session():
      convolution_kernel = [1., 2., 1.]
      spectrum = fft_ops.fft(
          math_ops.cast(convolution_kernel, dtypes.complex64))

      # spectrum is shape [3] ==> operator is shape [3, 3]
      # spectrum is Hermitian ==> operator is real.
      operator = linalg.LinearOperatorCirculant(spectrum)

      # Allow for complex output so we can make sure it has zero imag part.
      self.assertEqual(operator.dtype, dtypes.complex64)

      matrix = operator.to_dense().eval()
      np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
コード例 #9
0
    def _operator_and_matrix(self,
                             build_info,
                             dtype,
                             use_placeholder,
                             ensure_self_adjoint_and_pd=False):
        shape = build_info.shape
        # For this test class, we are creating Hermitian spectrums.
        # We also want the spectrum to have eigenvalues bounded away from zero.
        #
        # pre_spectrum is bounded away from zero.
        pre_spectrum = linear_operator_test_util.random_uniform(
            shape=self._shape_to_spectrum_shape(shape),
            dtype=dtype,
            minval=1.,
            maxval=2.)
        pre_spectrum = math_ops.cast(math_ops.abs(pre_spectrum), dtype=dtype)
        pre_spectrum_c = _to_complex(pre_spectrum)

        # Real{IFFT[pre_spectrum]}
        #  = IFFT[EvenPartOf[pre_spectrum]]
        # is the IFFT of something that is also bounded away from zero.
        # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
        pre_h = fft_ops.ifft(pre_spectrum_c)

        # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
        # So we will make spectrum = FFT[h], for real valued h.
        h = math_ops.real(pre_h)
        h_c = _to_complex(h)

        spectrum = fft_ops.fft(h_c)

        lin_op_spectrum = spectrum

        if use_placeholder:
            lin_op_spectrum = array_ops.placeholder_with_default(spectrum,
                                                                 shape=None)

        operator = linalg.LinearOperatorCirculant(
            lin_op_spectrum,
            input_output_dtype=dtype,
            is_positive_definite=True if ensure_self_adjoint_and_pd else None,
            is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
        )

        mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)

        return operator, mat
コード例 #10
0
  def operator_and_matrix(
      self, shape_info, dtype, use_placeholder,
      ensure_self_adjoint_and_pd=False):
    shape = shape_info.shape
    # For this test class, we are creating Hermitian spectrums.
    # We also want the spectrum to have eigenvalues bounded away from zero.
    #
    # pre_spectrum is bounded away from zero.
    pre_spectrum = linear_operator_test_util.random_uniform(
        shape=self._shape_to_spectrum_shape(shape),
        dtype=dtype,
        minval=1.,
        maxval=2.)
    pre_spectrum = math_ops.cast(math_ops.abs(pre_spectrum), dtype=dtype)
    pre_spectrum_c = _to_complex(pre_spectrum)

    # Real{IFFT[pre_spectrum]}
    #  = IFFT[EvenPartOf[pre_spectrum]]
    # is the IFFT of something that is also bounded away from zero.
    # Therefore, FFT[pre_h] would be a well-conditioned spectrum.
    pre_h = fft_ops.ifft(pre_spectrum_c)

    # A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
    # So we will make spectrum = FFT[h], for real valued h.
    h = math_ops.real(pre_h)
    h_c = _to_complex(h)

    spectrum = fft_ops.fft(h_c)

    lin_op_spectrum = spectrum

    if use_placeholder:
      lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)

    operator = linalg.LinearOperatorCirculant(
        lin_op_spectrum,
        input_output_dtype=dtype,
        is_positive_definite=True if ensure_self_adjoint_and_pd else None,
        is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
    )

    mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)

    return operator, mat
コード例 #11
0
def auto_correlation(
    x,
    axis=-1,
    max_lags=None,
    center=True,
    normalize=True,
    name="auto_correlation"):
  """Auto correlation along one axis.

  Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
  `RXX` may be defined as  (with `E` expectation and `Conj` complex conjugate)

  ```
  RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
  W[n]   := (X[n] - MU) / S,
  MU     := E{ X[0] },
  S**2   := E{ (X[0] - MU) Conj(X[0] - MU) }.
  ```

  This function takes the viewpoint that `x` is (along one axis) a finite
  sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
  estimate of `RXX[m]` as follows:

  After extending `x` from length `L` to `inf` by zero padding, the auto
  correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as

  ```
  rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
  w[n]   := (x[n] - mu) / s,
  mu     := L**-1 sum_n x[n],
  s**2   := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
  ```

  The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
  often set `max_lags` small enough so that the entire output is meaningful.

  Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
  `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
  contains a slight bias, which goes to zero as `len(x) - m --> infinity`.

  Args:
    x:  `float32` or `complex64` `Tensor`.
    axis:  Python `int`. The axis number along which to compute correlation.
      Other dimensions index different batch members.
    max_lags:  Positive `int` tensor.  The maximum value of `m` to consider
      (in equation above).  If `max_lags >= x.shape[axis]`, we effectively
      re-set `max_lags` to `x.shape[axis] - 1`.
    center:  Python `bool`.  If `False`, do not subtract the mean estimate `mu`
      from `x[n]` when forming `w[n]`.
    normalize:  Python `bool`.  If `False`, do not divide by the variance
      estimate `s**2` when forming `w[n]`.
    name:  `String` name to prepend to created ops.

  Returns:
    `rxx`: `Tensor` of same `dtype` as `x`.  `rxx.shape[i] = x.shape[i]` for
      `i != axis`, and `rxx.shape[axis] = max_lags + 1`.

  Raises:
    TypeError:  If `x` is not a supported type.
  """
  # Implementation details:
  # Extend length N / 2 1-D array x to length N by zero padding onto the end.
  # Then, set
  #   F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
  # It is not hard to see that
  #   F[x]_k Conj(F[x]_k) = F[R]_k, where
  #   R_m := sum_n x_n Conj(x_{(n - m) mod N}).
  # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].

  # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
  # based version of estimating RXX.
  # Note that this is a special case of the Wiener-Khinchin Theorem.
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")

    # Rotate dimensions of x in order to put axis at the rightmost dim.
    # FFT op requires this.
    rank = util.prefer_static_rank(x)
    if axis < 0:
      axis = rank + axis
    shift = rank - 1 - axis
    # Suppose x.shape[axis] = T, so there are T "time" steps.
    #   ==> x_rotated.shape = B + [T],
    # where B is x_rotated's batch shape.
    x_rotated = util.rotate_transpose(x, shift)

    if center:
      x_rotated -= math_ops.reduce_mean(x_rotated, axis=-1, keepdims=True)

    # x_len = N / 2 from above explanation.  The length of x along axis.
    # Get a value for x_len that works in all cases.
    x_len = util.prefer_static_shape(x_rotated)[-1]

    # TODO(langmore) Investigate whether this zero padding helps or hurts.  At
    # the moment is necessary so that all FFT implementations work.
    # Zero pad to the next power of 2 greater than 2 * x_len, which equals
    # 2**(ceil(Log_2(2 * x_len))).  Note: Log_2(X) = Log_e(X) / Log_e(2).
    x_len_float64 = math_ops.cast(x_len, np.float64)
    target_length = math_ops.pow(
        np.float64(2.),
        math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
    pad_length = math_ops.cast(target_length - x_len_float64, np.int32)

    # We should have:
    # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
    #                     = B + [T + pad_length]
    x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)

    dtype = x.dtype
    if not dtype.is_complex:
      if not dtype.is_floating:
        raise TypeError("Argument x must have either float or complex dtype"
                        " found: {}".format(dtype))
      x_rotated_pad = math_ops.complex(x_rotated_pad,
                                       dtype.real_dtype.as_numpy_dtype(0.))

    # Autocorrelation is IFFT of power-spectral density (up to some scaling).
    fft_x_rotated_pad = fft_ops.fft(x_rotated_pad)
    spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
    # shifted_product is R[m] from above detailed explanation.
    # It is the inner product sum_n X[n] * Conj(X[n - m]).
    shifted_product = fft_ops.ifft(spectral_density)

    # Cast back to real-valued if x was real to begin with.
    shifted_product = math_ops.cast(shifted_product, dtype)

    # Figure out if we can deduce the final static shape, and set max_lags.
    # Use x_rotated as a reference, because it has the time dimension in the far
    # right, and was created before we performed all sorts of crazy shape
    # manipulations.
    know_static_shape = True
    if not x_rotated.shape.is_fully_defined():
      know_static_shape = False
    if max_lags is None:
      max_lags = x_len - 1
    else:
      max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
      max_lags_ = tensor_util.constant_value(max_lags)
      if max_lags_ is None or not know_static_shape:
        know_static_shape = False
        max_lags = math_ops.minimum(x_len - 1, max_lags)
      else:
        max_lags = min(x_len - 1, max_lags_)

    # Chop off the padding.
    # We allow users to provide a huge max_lags, but cut it off here.
    # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
    shifted_product_chopped = shifted_product[..., :max_lags + 1]

    # If possible, set shape.
    if know_static_shape:
      chopped_shape = x_rotated.shape.as_list()
      chopped_shape[-1] = min(x_len, max_lags + 1)
      shifted_product_chopped.set_shape(chopped_shape)

    # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]).  The
    # other terms were zeros arising only due to zero padding.
    # `denominator = (N / 2 - m)` (defined below) is the proper term to
    # divide by to make this an unbiased estimate of the expectation
    # E[X[n] Conj(X[n - m])].
    x_len = math_ops.cast(x_len, dtype.real_dtype)
    max_lags = math_ops.cast(max_lags, dtype.real_dtype)
    denominator = x_len - math_ops.range(0., max_lags + 1.)
    denominator = math_ops.cast(denominator, dtype)
    shifted_product_rotated = shifted_product_chopped / denominator

    if normalize:
      shifted_product_rotated /= shifted_product_rotated[..., :1]

    # Transpose dimensions back to those of x.
    return util.rotate_transpose(shifted_product_rotated, -shift)
コード例 #12
0
def training(samples, labels, canvas):
    shape_1 = np.shape(samples)[0]
    shape_2 = np.shape(samples)[1]

    for i in range(len(labels)):
        labels[i] = int(labels[i])

    labels = np.array(labels, dtype=np.float32)
    np.reshape(labels, (shape_1, 1))

    tf.compat.v1.disable_eager_execution()

    # задание тренируемого значения V и вычислительного графа.
    # обратите внимание, что вычисления фурье образа должно идти по строкам, т.к на входе матрица их сэмплов(батч)

    # задаем "веса"
    V = tf.Variable(shape_2 * [1.0], dtype=tf.float32, name="filtr_vector", trainable=True)

    # задаем вход x и выход output с вычислительным узлом tf.norm(...)

    batch_size = 16
    # зададим датасет в виде списка, чтобы было удобно его перемешивать

    x = tf.compat.v1.placeholder(tf.float32, [batch_size, shape_2], "data")

    output = tf.norm(tf.abs(ifft(tf.multiply(fft(tf.complex(x / tf.norm(x, axis=0), 0.)), tf.complex(V, 0.)))),
                     axis=1) / tf.norm(x)

    # задаем столбец меток для бача и функцию ошибки
    target = tf.compat.v1.placeholder(tf.float32, batch_size, "labels")
    cost = tf.reduce_mean(tf.pow(tf.pow(output, 2) - target, 2))

    optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)

    gvs = optimizer.compute_gradients(cost)
    # нужно обрезать градиенты, если они вдруг "взорвутся"
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]

    train_op = optimizer.apply_gradients(capped_gvs)

    # для контроля времени тренировки
    start_time = time.time()

    with tf.compat.v1.Session() as sess:
        sess.run(V.initializer)

        epoch = 8000

        losses = np.zeros(epoch)

        for i in range(epoch):


            if batch_size == shape_1:
                batch_of_samples = samples
                batch_of_labels = labels
            else:
                random_indeces= np.random.randint(0, shape_1 - batch_size)
                batch_of_samples = samples[random_indeces :random_indeces+batch_size]
                batch_of_labels = labels[random_indeces : random_indeces+batch_size]

            losses[i] = sess.run(cost, {x: batch_of_samples, target: batch_of_labels})
            sess.run(train_op, {x: batch_of_samples, target: batch_of_labels})

        tmp_V = V.eval()

    #  и фиксируем затраченное время
    elapsed_time = time.time() - start_time

    #запишем обученный вектор на диск:

    path_for_filtr="D:\\samples_for_kursach\\filter_vector"
    write_filtr(tmp_V, path_for_filtr)

    # теперь визуализируем, как шла тренировка
    canvas.axes.set_xticks([])
    canvas.axes.set_yticks([])
    canvas.axes.set_title("time spent on training: " + str(elapsed_time)[:5] + "  sek" + "        batch size "+ str(batch_size))

    canvas.axes = canvas.fig.add_subplot(121)
    canvas.plot_linear_signal(np.arange(epoch), losses)
    canvas.axes.set_xlabel('number of epochs')
    canvas.axes.set_ylabel('Loss')
    # canvas.axes.set_yscale("log")

    canvas.axes = canvas.fig.add_subplot(122)

    disc = 1000
    freq = np.fft.fftfreq(shape_2, disc)

    output_V = tmp_V[: -int(shape_2 / 2)]
    freq = freq[freq >= 0]


    canvas.plot_linear_signal(freq * disc, output_V)
    canvas.axes.set_xlabel('Hz')
    canvas.axes.set_ylabel('filter vector')

    #запишем фигуру в pdf

    file_with_id2 = open(path_for_filtr + "\\unic_tmp_id.txt", "r")
    id = file_with_id2.read()
    file_with_id2.close()

    id  = int(id) - 1

    tmp_pdf_file = PdfPages(path_for_filtr + "\\vec_" + str(id) + ".pdf")
    tmp_pdf_file.savefig(canvas.fig)
    tmp_pdf_file.close()
コード例 #13
0
def auto_correlation(x,
                     axis=-1,
                     max_lags=None,
                     center=True,
                     normalize=True,
                     name="auto_correlation"):
    """Auto correlation along one axis.

  Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
  `RXX` may be defined as  (with `E` expectation and `Conj` complex conjugate)

  ```
  RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
  W[n]   := (X[n] - MU) / S,
  MU     := E{ X[0] },
  S**2   := E{ (X[0] - MU) Conj(X[0] - MU) }.
  ```

  This function takes the viewpoint that `x` is (along one axis) a finite
  sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
  estimate of `RXX[m]` as follows:

  After extending `x` from length `L` to `inf` by zero padding, the auto
  correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as

  ```
  rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
  w[n]   := (x[n] - mu) / s,
  mu     := L**-1 sum_n x[n],
  s**2   := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
  ```

  The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
  often set `max_lags` small enough so that the entire output is meaningful.

  Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
  `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
  contains a slight bias, which goes to zero as `len(x) - m --> infinity`.

  Args:
    x:  `float32` or `complex64` `Tensor`.
    axis:  Python `int`. The axis number along which to compute correlation.
      Other dimensions index different batch members.
    max_lags:  Positive `int` tensor.  The maximum value of `m` to consider
      (in equation above).  If `max_lags >= x.shape[axis]`, we effectively
      re-set `max_lags` to `x.shape[axis] - 1`.
    center:  Python `bool`.  If `False`, do not subtract the mean estimate `mu`
      from `x[n]` when forming `w[n]`.
    normalize:  Python `bool`.  If `False`, do not divide by the variance
      estimate `s**2` when forming `w[n]`.
    name:  `String` name to prepend to created ops.

  Returns:
    `rxx`: `Tensor` of same `dtype` as `x`.  `rxx.shape[i] = x.shape[i]` for
      `i != axis`, and `rxx.shape[axis] = max_lags + 1`.

  Raises:
    TypeError:  If `x` is not a supported type.
  """
    # Implementation details:
    # Extend length N / 2 1-D array x to length N by zero padding onto the end.
    # Then, set
    #   F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
    # It is not hard to see that
    #   F[x]_k Conj(F[x]_k) = F[R]_k, where
    #   R_m := sum_n x_n Conj(x_{(n - m) mod N}).
    # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].

    # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
    # based version of estimating RXX.
    # Note that this is a special case of the Wiener-Khinchin Theorem.
    with ops.name_scope(name, values=[x]):
        x = ops.convert_to_tensor(x, name="x")

        # Rotate dimensions of x in order to put axis at the rightmost dim.
        # FFT op requires this.
        rank = util.prefer_static_rank(x)
        if axis < 0:
            axis = rank + axis
        shift = rank - 1 - axis
        # Suppose x.shape[axis] = T, so there are T "time" steps.
        #   ==> x_rotated.shape = B + [T],
        # where B is x_rotated's batch shape.
        x_rotated = util.rotate_transpose(x, shift)

        if center:
            x_rotated -= math_ops.reduce_mean(x_rotated,
                                              axis=-1,
                                              keepdims=True)

        # x_len = N / 2 from above explanation.  The length of x along axis.
        # Get a value for x_len that works in all cases.
        x_len = util.prefer_static_shape(x_rotated)[-1]

        # TODO(langmore) Investigate whether this zero padding helps or hurts.  At
        # the moment is necessary so that all FFT implementations work.
        # Zero pad to the next power of 2 greater than 2 * x_len, which equals
        # 2**(ceil(Log_2(2 * x_len))).  Note: Log_2(X) = Log_e(X) / Log_e(2).
        x_len_float64 = math_ops.cast(x_len, np.float64)
        target_length = math_ops.pow(
            np.float64(2.),
            math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
        pad_length = math_ops.cast(target_length - x_len_float64, np.int32)

        # We should have:
        # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
        #                     = B + [T + pad_length]
        x_rotated_pad = util.pad(x_rotated,
                                 axis=-1,
                                 back=True,
                                 count=pad_length)

        dtype = x.dtype
        if not dtype.is_complex:
            if not dtype.is_floating:
                raise TypeError(
                    "Argument x must have either float or complex dtype"
                    " found: {}".format(dtype))
            x_rotated_pad = math_ops.complex(
                x_rotated_pad, dtype.real_dtype.as_numpy_dtype(0.))

        # Autocorrelation is IFFT of power-spectral density (up to some scaling).
        fft_x_rotated_pad = fft_ops.fft(x_rotated_pad)
        spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
        # shifted_product is R[m] from above detailed explanation.
        # It is the inner product sum_n X[n] * Conj(X[n - m]).
        shifted_product = fft_ops.ifft(spectral_density)

        # Cast back to real-valued if x was real to begin with.
        shifted_product = math_ops.cast(shifted_product, dtype)

        # Figure out if we can deduce the final static shape, and set max_lags.
        # Use x_rotated as a reference, because it has the time dimension in the far
        # right, and was created before we performed all sorts of crazy shape
        # manipulations.
        know_static_shape = True
        if not x_rotated.shape.is_fully_defined():
            know_static_shape = False
        if max_lags is None:
            max_lags = x_len - 1
        else:
            max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
            max_lags_ = tensor_util.constant_value(max_lags)
            if max_lags_ is None or not know_static_shape:
                know_static_shape = False
                max_lags = math_ops.minimum(x_len - 1, max_lags)
            else:
                max_lags = min(x_len - 1, max_lags_)

        # Chop off the padding.
        # We allow users to provide a huge max_lags, but cut it off here.
        # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
        shifted_product_chopped = shifted_product[..., :max_lags + 1]

        # If possible, set shape.
        if know_static_shape:
            chopped_shape = x_rotated.shape.as_list()
            chopped_shape[-1] = min(x_len, max_lags + 1)
            shifted_product_chopped.set_shape(chopped_shape)

        # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]).  The
        # other terms were zeros arising only due to zero padding.
        # `denominator = (N / 2 - m)` (defined below) is the proper term to
        # divide by to make this an unbiased estimate of the expectation
        # E[X[n] Conj(X[n - m])].
        x_len = math_ops.cast(x_len, dtype.real_dtype)
        max_lags = math_ops.cast(max_lags, dtype.real_dtype)
        denominator = x_len - math_ops.range(0., max_lags + 1.)
        denominator = math_ops.cast(denominator, dtype)
        shifted_product_rotated = shifted_product_chopped / denominator

        if normalize:
            shifted_product_rotated /= shifted_product_rotated[..., :1]

        # Transpose dimensions back to those of x.
        return util.rotate_transpose(shifted_product_rotated, -shift)