Пример #1
0
def arco(f):
    if len(f.shape) <= 1:
        arco, _ = alg.AR_est_YW(f, p)
    else:
        arco = np.array([
            alg.AR_est_YW(f[:, i], p)[0]
            if sum(abs(f[:, i])) > 1e-5 else np.zeros(p)
            for i in range(f.shape[-1])
        ])
    #  print f.shape, arco.shape
    return arco.transpose(), time.time()
Пример #2
0
  def armdl_func (sys,h,w):
    s = (h,w)
    x_c = np.zeros(s) 
   
    x_c[0] = sys;
    npts1 = w
    for i in np.arange(9):
      xi=x_c[i]
      c_e, sigma_e = alg.AR_est_YW(xi, 15)
      xo, _, _ = utils.ar_generator(N=npts1, sigma=sigma_e, coefs=c_e, drop_transients=45)
      x_c[i+1][:]=xo

    plt.figure  
    plt.plot(x_c[h-1], label='estimated process')
    plt.plot(sys, 'k', label='original process')
    plt.legend()
    err = x_c[h-1] - sys
    mse = np.dot(err, err) / w
    plt.title('MSE = %1.3e' % mse)
    plt.grid(True)
    plt.show()
  
    f_sys, Pxx_sys = signal.periodogram(x_c[h-1], fs)
    f_syswel, Pxx_syswel = signal.welch(sys,fs)
    plt.figure
    plt.semilogy(f_sys[1:],Pxx_sys[1:], label='estimated process')
    plt.semilogy(f_syswel[1:],Pxx_syswel[1:], 'k', label='original process')
    plt.legend()
    plt.title('PSD')
    plt.grid(True)
    plt.show()
Пример #3
0
def test_AR_est_consistency():
    order = 10  # some even number
    ak = _random_poles(order // 2)
    x, v, _ = utils.ar_generator(N=512, coefs=-ak[1:], drop_transients=100)
    ak_yw, ssq_yw = tsa.AR_est_YW(x, order)
    ak_ld, ssq_ld = tsa.AR_est_LD(x, order)
    npt.assert_almost_equal(ak_yw, ak_ld)
    npt.assert_almost_equal(ssq_yw, ssq_ld)
Пример #4
0
def autogressiveModelParameters(epoch):
    feature = []
    # Order 11 for the autoregressive model, why?
    for i in range(14):
        coeff, sig = alg.AR_est_YW(np.array(epoch[i]), 11, )
        feature.append(coeff)

    return feature
Пример #5
0
def emg_arc(signal, order=4):
    if order >= len(signal):
        rd = len(signal) - 1
    else:
        rd = order
    arc, ars = alg.AR_est_YW(signal, rd)
    arc = np.array(arc)
    return arc
Пример #6
0
def test_AR_YW():
    arsig, _, _ = utils.ar_generator(N=512)
    avg_pwr = (arsig * arsig.conjugate()).mean()
    order = 8
    ak, sigma_v = tsa.AR_est_YW(arsig, order)
    w, psd = tsa.AR_psd(ak, sigma_v)
    # the psd is a one-sided power spectral density, which has been
    # multiplied by 2 to preserve the property that
    # 1/2pi int_{-pi}^{pi} Sxx(w) dw = Rxx(0)

    # evaluate this integral numerically from 0 to pi
    dw = np.pi / len(psd)
    avg_pwr_est = np.trapz(psd, dx=dw) / (2 * np.pi)
    # consistency on the order of 10**0 is pretty good for this test
    npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)

    # Test for providing the autocovariance as an input:
    ak, sigma_v = tsa.AR_est_YW(arsig, order, utils.autocov(arsig))
    w, psd = tsa.AR_psd(ak, sigma_v)
    avg_pwr_est = np.trapz(psd, dx=dw) / (2 * np.pi)
    npt.assert_almost_equal(avg_pwr, avg_pwr_est, decimal=0)
Пример #7
0
def emg_arc(signal, order):
    arc, ars = alg.AR_est_YW(signal, order)
    arc = np.array(arc)
    return arc
Пример #8
0
def mssa(ys, M=None, nMC=0, f=0.3):
    '''Multi-channel singular spectrum analysis analysis

    Multivariate generalization of SSA [2], using the original algorithm of [1].
    Each variable is called a channel, hence the name.

    Parameters
    ----------

    ys : array
          multiple time series (dimension: length of time series x total number of time series)

    M : int
       window size (embedding dimension, default: 10% of the length of the series)

    nMC : int
       Number of iteration in the Monte-Carlo process [default=0, no Monte Carlo process]

    f : float
       fraction (0<f<=1) of good data points for identifying significant PCs [f = 0.3]

    Returns
    -------
    res : dict
        Containing:

        - eigvals : array of eigenvalue spectrum

        - eigvals05 : The 5% percentile of eigenvalues

        - eigvals95 : The 95% percentile of eigenvalues

        - PC : matrix of principal components (2D array)

        - RC : matrix of RCs (nrec,N,nrec*M) (2D array)

    References
    ----------
    [1]_ Vautard, R., and M. Ghil (1989), Singular spectrum analysis in nonlinear
    dynamics, with applications to paleoclimatic time series, Physica D, 35,
    395–424.

    [2]_ Jiang, N., J. D. Neelin, and M. Ghil (1995), Quasi-quadrennial and
    quasi-biennial variability in the equatorial Pacific, Clim. Dyn., 12, 101-112.

    See Also
    --------

    pyleoclim.utils.decomposition.ssa : Singular Spectrum Analysis (single channel)

    '''
    N = len(ys[:, 0])
    nrec = len(ys[0, :])
    if M == None:
        M=int(N/10)
    Y = np.zeros((N - M + 1, nrec * M))
    for irec in np.arange(nrec):
        for m in np.arange(0, M):
            Y[:, m + irec * M] = ys[m:N - M + 1 + m, irec]

    C = np.dot(np.nan_to_num(np.transpose(Y)), np.nan_to_num(Y)) / (N - M + 1)
    D, eigvecs = eigh(C)

    sort_tmp = np.sort(D)
    eigvals = sort_tmp[::-1]
    sortarg = np.argsort(-D)

    eigvecs = eigvecs[:, sortarg]

    # test the signifiance using Monte-Carlo
    Ym = np.zeros((N - M + 1, nrec * M))
    noise = np.zeros((nrec, N, nMC))
    for irec in np.arange(nrec):
        noise[irec, 0, :] = ys[0, irec]
    eigvals_R = np.zeros((nrec * M, nMC))
    # estimate coefficents of ar1 processes, and then generate ar1 time series (noise)
    # TODO : update to use ar1_sim(), as in ssa() 
    for irec in np.arange(nrec):
        Xs = ys[:, irec]
        coefs_est, var_est = alg.AR_est_YW(Xs[~np.isnan(Xs)], 1)
        sigma_est = np.sqrt(var_est)

        for jt in range(1, N):
            noise[irec, jt, :] = coefs_est * noise[irec, jt - 1, :] + sigma_est * np.random.randn(1, nMC)

    for m in range(nMC):
        for irec in np.arange(nrec):
            noise[irec, :, m] = (noise[irec, :, m] - np.mean(noise[irec, :, m])) / (
                np.std(noise[irec, :, m], ddof=1))
            for im in np.arange(0, M):
                Ym[:, im + irec * M] = noise[irec, im:N - M + 1 + im, m]
        Cn = np.dot(np.nan_to_num(np.transpose(Ym)), np.nan_to_num(Ym)) / (N - M + 1)
        # eigvals_R[:,m] = np.diag(np.dot(np.dot(eigvecs,Cn),np.transpose(eigvecs)))
        eigvals_R[:, m] = np.diag(np.dot(np.dot(np.transpose(eigvecs), Cn), eigvecs))

    eigvals95 = np.percentile(eigvals_R, 95, axis=1)
    eigvals05 = np.percentile(eigvals_R, 5, axis=1)


    # determine principal component time series
    PC = np.zeros((N - M + 1, nrec * M))
    PC[:, :] = np.nan
    for k in np.arange(nrec * M):
        for i in np.arange(0, N - M + 1):
            #   modify for nan
            prod = Y[i, :] * eigvecs[:, k]
            ngood = sum(~np.isnan(prod))
            #   must have at least m*f good points
            if ngood >= M * f:
                PC[i, k] = sum(prod[~np.isnan(prod)])  # the columns of this matrix are Ak(t), k=1 to M (T-PCs)

    # compute reconstructed timeseries
    Np = N - M + 1

    RC = np.zeros((nrec, N, nrec * M))

    for k in np.arange(nrec):
        for im in np.arange(M):
            x2 = np.dot(np.expand_dims(PC[:, im], axis=1), np.expand_dims(eigvecs[0 + k * M:M + k * M, im], axis=0))
            x2 = np.flipud(x2)

            for n in np.arange(N):
                RC[k, n, im] = np.diagonal(x2, offset=-(Np - 1 - n)).mean()
    res = {'eigvals': eigvals, 'eigvecs': eigvecs, 'q05': eigvals05, 'q95': eigvals95, 'PC': PC, 'RC': RC}

    return res
Пример #9
0
#
# plt.plot(a1,a2,'or')
# plt.grid()
# plt.title('položaji polov, #polov=20')
# plt.xlabel('Re{z}')
# plt.ylabel('Im{z}')
# x = np.linspace(-1.0, 1.0, 100)
# y = np.linspace(-1.0, 1.0, 100)
# X, Y = np.meshgrid(x,y)
# F = X**2 + Y**2 - 1
# plt.contour(X,Y,F,[0])
# plt.savefig('polozaji_polov.png')
# plt.show()

polovica = int(len(koncentracija) / 2)

podatki = koncentracija[:polovica]
st_koef = 20

a, b = alg.AR_est_YW(np.array(podatki), st_koef)

# y2=aproksimacija(x,a,ii)
y3 = aproksimacija(podatki, a, polovica)

plt.plot(koncentracija, label='meritev')
plt.plot(y3, label='napoved N=' + str(st_koef))
plt.legend(title=r'$C0_2$')
plt.grid()
plt.savefig('napoved_co2.png')
plt.show()
Пример #10
0
fig01 = plot_tseries(ts_x, label='AR signal')
fig01 = plot_tseries(ts_noise, fig=fig01, label='Noise')
fig01.axes[0].legend()
"""

.. image:: fig/ar_est_1var_01.png


Now we estimate back the model parameters, using two different estimation
algorithms.


"""

coefs_est, sigma_est = alg.AR_est_YW(X, 2)
# no rigorous purpose behind 100 transients
X_hat, _, _ = utils.ar_generator(N=npts,
                                 sigma=sigma_est,
                                 coefs=coefs_est,
                                 drop_transients=100,
                                 v=noise)
fig02 = plt.figure()
ax = fig02.add_subplot(111)
ax.plot(np.arange(100, len(X_hat) + 100), X_hat, label='estimated process')
ax.plot(X, 'g--', label='original process')
ax.legend()
err = X_hat - X[100:]
mse = np.dot(err, err) / len(X_hat)
ax.set_title('Mean Square Error: %1.3e' % mse)
Пример #11
0
st_tock=512

delta_tocke=2*np.pi/st_tock
print(1/delta_tocke)
x=np.arange(0,2*np.pi,delta_tocke)


delta=15
osnovna=75
st_polov=20

signal=np.sin(osnovna*x)+np.sin((osnovna+delta)*x)

print(delta_tocke*osnovna/2/np.pi,delta_tocke*(osnovna+delta)/2/np.pi)
a, b = alg.AR_est_YW(signal, st_polov)
spekter = alg.autoregressive.AR_psd(a, b)



normalizacija_spekter=spekter[1]/sum(spekter[1])


plt.plot(spekter[0]/2/np.pi,normalizacija_spekter,'o-')
plt.ylabel('gostota moči')
plt.xlabel('frekvenca')
plt.grid()
plt.savefig('sin_2_frekvenci_osnovno')
plt.show()

print(len(signal))
Пример #12
0
def mssa(data, M, MC=1000, f=0.3):
    '''Multi-channel SSA analysis
    (applicable for data including missing values)
    and test the significance by Monte-Carlo method

    Input:
    data: multiple time series (dimension: length of time series x total number of time series)
    M: window size
    MC: Number of iteration in the Monte-Carlo process
    nmode: The number of modes to extract
    f: fraction (0<f<=1) of good data points for identifying
    significant PCs [f = 0.3]

    Output:
    deval : eigenvalue spectrum
    q05: The 5% percentile of eigenvalues
    q95: The 95% percentile of eigenvalues
    PC      : matrix of principal components
    RC      : matrix of RCs (nrec,N,nrec*M) (only if K>0)
    '''

    #Xr = standardize(data)

    N = len(data[:, 0])
    nrec = len(data[0, :])

    Y = np.zeros((N - M + 1, nrec * M))
    for irec in np.arange(nrec):
        for m in np.arange(0, M):
            Y[:, m + irec * M] = data[m:N - M + 1 + m, irec]
    C = np.dot(np.nan_to_num(np.transpose(Y)), np.nan_to_num(Y)) / (N - M + 1)

    eig_val, eig_vec = eigh(C)

    sort_tmp = np.sort(eig_val)
    deval = sort_tmp[::-1]
    sortarg = np.argsort(-eig_val)

    eig_vec = eig_vec[:, sortarg]

    # test the signifiance using Monte-Carlo
    Ym = np.zeros((N - M + 1, nrec * M))
    noise = np.zeros((nrec, N, MC))
    for irec in np.arange(nrec):
        noise[irec, 0, :] = data[0, irec]
    Lamda_R = np.zeros((nrec * M, MC))
    # estimate coefficents of ar1 processes, and then generate ar1 time series (noise)
    for irec in np.arange(nrec):
        Xr = data[:, irec]
        coefs_est, var_est = alg.AR_est_YW(Xr[~np.isnan(Xr)], 1)
        sigma_est = np.sqrt(var_est)

        for jt in range(1, N):
            noise[irec, jt, :] = coefs_est * noise[
                irec, jt - 1, :] + sigma_est * np.random.randn(1, MC)

    for m in range(MC):
        for irec in np.arange(nrec):
            noise[irec, :,
                  m] = (noise[irec, :, m] - np.mean(noise[irec, :, m])) / (
                      np.std(noise[irec, :, m], ddof=1))
            for im in np.arange(0, M):
                Ym[:, im + irec * M] = noise[irec, im:N - M + 1 + im, m]
        Cn = np.dot(np.nan_to_num(np.transpose(Ym)),
                    np.nan_to_num(Ym)) / (N - M + 1)
        #Lamda_R[:,m] = np.diag(np.dot(np.dot(eig_vec,Cn),np.transpose(eig_vec)))
        Lamda_R[:,
                m] = np.diag(np.dot(np.dot(np.transpose(eig_vec), Cn),
                                    eig_vec))

    q95 = np.percentile(Lamda_R, 95, axis=1)
    q05 = np.percentile(Lamda_R, 5, axis=1)

    #modes = np.arange(nmode)

    # determine principal component time series
    PC = np.zeros((N - M + 1, nrec * M))
    PC[:, :] = np.nan
    for k in np.arange(nrec * M):
        for i in np.arange(0, N - M + 1):
            #   modify for nan
            prod = Y[i, :] * eig_vec[:, k]
            ngood = sum(~np.isnan(prod))
            #   must have at least m*f good points
            if ngood >= M * f:
                PC[i, k] = sum(
                    prod[~np.isnan(prod)]
                )  # the columns of this matrix are Ak(t), k=1 to M (T-PCs)

    # compute reconstructed timeseries
    Np = N - M + 1

    RC = np.zeros((nrec, N, nrec * M))

    for k in np.arange(nrec):
        for im in np.arange(M):
            x2 = np.dot(
                np.expand_dims(PC[:, im], axis=1),
                np.expand_dims(eig_vec[0 + k * M:M + k * M, im], axis=0))
            x2 = np.flipud(x2)

            for n in np.arange(N):
                RC[k, n, im] = np.diagonal(x2, offset=-(Np - 1 - n)).mean()

    return deval, eig_vec, q95, q05, PC, RC
Пример #13
0
def ssa_all(data, M, MC=1000, f=0.3):
    '''SSA analysis for a time series
    (applicable for data including missing values)
    and test the significance by Monte-Carlo method

    Input:
    data: time series
    M: window size
    MC: Number of iteration in the Monte-Carlo process
    nmode: The number of modes to extract
    f: fraction (0<f<=1) of good data points for identifying
    significant PCs [f = 0.3]

    Output:
    deval : eigenvalue spectrum
    q05: The 5% percentile of eigenvalues
    q95: The 95% percentile of eigenvalues
    PC      : matrix of principal components
    RC      : matrix of RCs (N*M, nmode) (only if K>0)
    '''

    from nitime import utils
    from nitime import algorithms as alg

    Xr = standardize(data)
    N = len(data)

    c = np.zeros(M)

    for j in range(M):
        prod = Xr[0:N - j] * Xr[j:N]
        c[j] = sum(prod[~np.isnan(prod)]) / (sum(~np.isnan(prod)) - 1)

    C = toeplitz(c[0:M])

    eig_val, eig_vec = eigh(C)

    sort_tmp = np.sort(eig_val)
    deval = sort_tmp[::-1]
    sortarg = np.argsort(-eig_val)

    eig_vec = eig_vec[:, sortarg]

    coefs_est, var_est = alg.AR_est_YW(Xr[~np.isnan(Xr)], 1)
    sigma_est = np.sqrt(var_est)

    noise = np.zeros((N, MC))
    noise[0, :] = Xr[0]
    Lamda_R = np.zeros((M, MC))

    for jt in range(1, N):
        noise[jt, :] = coefs_est * noise[
            jt - 1, :] + sigma_est * np.random.randn(1, MC)

    for m in range(MC):
        noise[:,
              m] = (noise[:, m] - np.mean(noise[:, m])) / (np.std(noise[:, m],
                                                                  ddof=1))
        Gn = np.correlate(noise[:, m], noise[:, m], "full")
        lgs = np.arange(-N + 1, N)
        Gn = Gn / (N - abs(lgs))
        Cn = toeplitz(Gn[N - 1:N - 1 + M])
        #Lamda_R[:,m] = np.diag(np.dot(np.dot(eig_vec,Cn),np.transpose(eig_vec)))
        Lamda_R[:,
                m] = np.diag(np.dot(np.dot(np.transpose(eig_vec), Cn),
                                    eig_vec))

    q95 = np.percentile(Lamda_R, 95, axis=1)
    q05 = np.percentile(Lamda_R, 5, axis=1)
    #modes = np.arange(nmode)

    # determine principal component time series
    PC = np.zeros((N - M + 1, M))
    PC[:, :] = np.nan
    for k in np.arange(M):
        for i in np.arange(0, N - M + 1):
            #   modify for nan
            prod = Xr[i:i + M] * eig_vec[:, k]
            ngood = sum(~np.isnan(prod))
            #   must have at least m*f good points
            if ngood >= M * f:
                PC[i, k] = sum(
                    prod[~np.isnan(prod)]
                ) * M / ngood  # the columns of this matrix are Ak(t), k=1 to M (T-PCs)

    # compute reconstructed timeseries
    Np = N - M + 1

    RC = np.zeros((N, M))

    for im in np.arange(M):
        x2 = np.dot(np.expand_dims(PC[:, im], axis=1),
                    np.expand_dims(eig_vec[0:M, im], axis=0))
        x2 = np.flipud(x2)

        for n in np.arange(N):
            RC[n, im] = np.diagonal(x2, offset=-(Np - 1 - n)).mean()

    return deval, eig_vec, q05, q95, PC, RC