def test_ywcoef(): assert_array_almost_equal( mlywar.arcoef100[1:], -regression.yule_walker(x100, 10, method="mle")[0], 8, ) assert_array_almost_equal( mlywar.arcoef1000[1:], -regression.yule_walker(x1000, 20, method="mle")[0], 8, )
def apply(self, chunk): if chunk.shape[0] <= self.max_chunk_size: x = self.buffer.update_buffer(chunk) y = sg.filtfilt(*self.ba_bandpass, x) if self.delay < self.n_taps_edge_left: ar, s = yule_walker(y.real[:-self.n_taps_edge_left], self.ar_order, 'mle') pred = y.real[:-self.n_taps_edge_left].tolist() for _ in range(self.n_taps_edge_left + self.n_taps_edge_right): pred.append(ar[::-1].dot(pred[-self.ar_order:])) an_signal = sg.hilbert(pred) env = an_signal[-self.n_taps_edge_right-self.delay-len(chunk)+1:-self.n_taps_edge_right-self.delay+1]*np.ones(len(chunk)) # # plt.plot(x, alpha=0.1) # plt.plot(pred, alpha=0.9) # plt.plot(np.abs(an_signal)) # plt.plot(y[:-self.n_taps_edge_left], 'k') # plt.plot(y, 'k--') # # plt.show() else: env = sg.hilbert(y)[-self.delay-len(chunk)+1:-self.delay+1] * np.ones(len(chunk)) return env, y, pred else: return rt_emulate(self, chunk, self.max_chunk_size)
if __name__ == '__main__': nobs = 50 ar = [1.0, -0.8, 0.1] ma = [1.0, 0.1, 0.2] #ma = [1] np.random.seed(9875789) y = arma_generate_sample(ar, ma, nobs, 2) y -= y.mean() #I have not checked treatment of mean yet, so remove mod = MLEGLS(y) mod.nar, mod.nma = 2, 2 #needs to be added, no init method mod.nobs = len(y) res = mod.fit(start_params=[0.1, -0.8, 0.2, 0.1, 1.]) print('DGP', ar, ma) print(res.params) from statsmodels.regression import yule_walker print(yule_walker(y, 2)) #resi = mod.fit_invertible(start_params=[0.1,0,0.2,0, 0.5]) #print(resi.params arpoly, mapoly = getpoly(mod, res.params[:-1]) data = sm.datasets.sunspots.load(as_pandas=False) #ys = data.endog[-100:] ## ys = data.endog[12:]-data.endog[:-12] ## ys -= ys.mean() ## mods = MLEGLS(ys) ## mods.nar, mods.nma = 13, 1 #needs to be added, no init method ## mods.nobs = len(ys) ## ress = mods.fit(start_params=np.r_[0.4, np.zeros(12), [0.2, 5.]],maxiter=200) ## print(ress.params ## import matplotlib.pyplot as plt
def genelwwLOCwithoutdelay(one_signal, orderLPC, Fs_Hz, az_deg, el_deg, velocity_mps, Sigma_aec, xsensors_m): """ # Generate loss of coherence with noise # Synopsis: # genelwwLOCwithoutdelay(one_signal, orderLPC, # Fs_Hz, az_deg,el_deg, velocity_mps, Sigma_aec, xsensors_m) # Inputs # ones_signal: one SOI # orderLPC: # Fs_Hz: sampling frequency in Hz # az_deg, el_deg, velocity_mps: # deterministic part of the DOA # # Sigma_aec : 3 array of the varaince of the random part of the DOA # Sigma_aec[0] = std of the azimuth in rd # Sigma_aec[1] = std of the elevation in rd # Sigma_aec[2] = std of the velocity in m/s # # xsensor_m: M x 3 # 3D locations of the M sensors # #======= # Outputs: # signal_withLOC: # M-ary signal (with delays and LOC) # # tau_s: TDOA on the C=M(M-1)/2 sensor pairs # tau_s = (r_m-r_k)' x theta # where theta is the wavenumber (in s/m) # # signal_withoutLOC: # M-ary signal (with delays but without LOC) # # Gamma2_epsilon: # positive matrix en s2/m2Ctrl+S (LOC) #===================================================================== """ M = size(xsensors_m,0); az_rd = az_deg*pi/180; el_rd = el_deg*pi/180; cosa = cos(az_rd); sina = sin(az_rd); cose = cos(el_rd); sine = sin(el_rd); ##===== deterministic part theta0_spm = array([-sina*cose, -cosa*cosa, sine]/velocity_mps); ##===== random part Jacobian = zeros([3,3]) Jacobian[:,0] = array([-cosa*cose, sina*sine, \ sina*cose/velocity_mps]/velocity_mps); Jacobian[:,1] = array([sina*cose, cosa*sine, \ cosa*cose/velocity_mps]/velocity_mps); Jacobian[:,2] = array([0, cose, \ -sine/velocity_mps]/velocity_mps); Gamma2_epsilon = dot(transpose(Jacobian), \ dot((Sigma_aec*Sigma_aec),Jacobian)); ## AR analysis #%% N = len(one_signal); theta_lp, sigma_AR = yule_walker(one_signal,orderLPC); theta_AR = append(1,theta_lp) residue = lfilter(theta_AR,[1.0],one_signal)/sigma_AR; F_Hz = Fs_Hz*array(range(N))/N; F2_Hz2 = F_Hz ** 2; pi2 = pi*pi; #==== extract M random sequences from residue by permutation wwhite = zeros([N,M]); wwhite[:,0] = residue[:,0]; for im in range(1,M): wwhite[:,im] = residue[random.permutation(N),0]; #=== innovation generation cf = ones([M,M,N]); cp = 0; for im1 in range(M-1): for im2 in range(im1+1,M): cp = cp+1; sigma2_s2 = dot(dot((xsensors_m[im2,:]-xsensors_m[im1,:]), Gamma2_epsilon),(xsensors_m[im2,:]-xsensors_m[im1,:])); # LOC WITHOUT delays cf[im1,im2,:] = exp(-2*pi2*F2_Hz2*sigma2_s2); cf[im2,im1,:] = conj(cf[im1,im2,:]); Wf = fft(wwhite); Xf = zeros([N,M])+ complex(0,1)*zeros([N,M]) #=== GG = zeros([M,M,N]); for indf in range(N/2): # 1:N/2+1, GGaux = cf[:,:,indf]; UG,GGauxsvd,VG = svd(GGaux); sqrtmGGaux = dot(dot(UG,diag(sqrt(GGauxsvd))),VG); Xf[indf,:] = dot(Wf[indf,:],sqrtmGGaux); Xf[range(N/2+1,N),:] = conj(Xf[range(N-N/2-1,0,-1),:]); Xf[N/2,:] = real(Xf[N/2,:])/2.0; Xf[0,:] = real(Xf[0,:])/2.0; xt = ifft(Xf, axis=0); signal_out_aux = real(xt); signal_out_aux = signal_out_aux-ones([N,1])\ *mean(signal_out_aux); #=== synthesis of signal signal_out_with = signal_out_aux; signal_withLOC = signal_out_aux; for im in range(M): xe = signal_out_aux[:,im]; signal_out_with[:,im] = lfilter([1.0], theta_AR, xe); signal_withLOC[:,im] = signal_out_with[:,im]\ / std(signal_out_with[:,im]); return signal_withLOC, theta0_spm, Gamma2_epsilon, cf
if __name__ == '__main__': nobs = 50 ar = [1.0, -0.8, 0.1] ma = [1.0, 0.1, 0.2] #ma = [1] np.random.seed(9875789) y = arma_generate_sample(ar,ma,nobs,2) y -= y.mean() #I haven't checked treatment of mean yet, so remove mod = MLEGLS(y) mod.nar, mod.nma = 2, 2 #needs to be added, no init method mod.nobs = len(y) res = mod.fit(start_params=[0.1, -0.8, 0.2, 0.1, 1.]) print('DGP', ar, ma) print(res.params) from statsmodels.regression import yule_walker print(yule_walker(y, 2)) #resi = mod.fit_invertible(start_params=[0.1,0,0.2,0, 0.5]) #print(resi.params arpoly, mapoly = getpoly(mod, res.params[:-1]) data = sm.datasets.sunspots.load(as_pandas=False) #ys = data.endog[-100:] ## ys = data.endog[12:]-data.endog[:-12] ## ys -= ys.mean() ## mods = MLEGLS(ys) ## mods.nar, mods.nma = 13, 1 #needs to be added, no init method ## mods.nobs = len(ys) ## ress = mods.fit(start_params=np.r_[0.4, np.zeros(12), [0.2, 5.]],maxiter=200) ## print(ress.params ## import matplotlib.pyplot as plt
if __name__ == '__main__': nobs = 50 ar = [1.0, -0.8, 0.1] ma = [1.0, 0.1, 0.2] #ma = [1] np.random.seed(9875789) y = arma_generate_sample(ar,ma,nobs,2) y -= y.mean() #I haven't checked treatment of mean yet, so remove mod = MLEGLS(y) mod.nar, mod.nma = 2, 2 #needs to be added, no init method mod.nobs = len(y) res = mod.fit(start_params=[0.1, -0.8, 0.2, 0.1, 1.]) print 'DGP', ar, ma print res.params from statsmodels.regression import yule_walker print yule_walker(y, 2) #resi = mod.fit_invertible(start_params=[0.1,0,0.2,0, 0.5]) #print resi.params arpoly, mapoly = getpoly(mod, res.params[:-1]) data = sm.datasets.sunspots.load() #ys = data.endog[-100:] ## ys = data.endog[12:]-data.endog[:-12] ## ys -= ys.mean() ## mods = MLEGLS(ys) ## mods.nar, mods.nma = 13, 1 #needs to be added, no init method ## mods.nobs = len(ys) ## ress = mods.fit(start_params=np.r_[0.4, np.zeros(12), [0.2, 5.]],maxiter=200) ## print ress.params ## #from statsmodels.sandbox.tsa import arima as tsaa
def arCoeff(array): sigma, rho = regression.yule_walker(array, order=4) return sigma, rho
if __name__ == '__main__': nobs = 50 ar = [1.0, -0.8, 0.1] ma = [1.0, 0.1, 0.2] #ma = [1] np.random.seed(9875789) y = arma_generate_sample(ar, ma, nobs, 2) y -= y.mean() #I haven't checked treatment of mean yet, so remove mod = MLEGLS(y) mod.nar, mod.nma = 2, 2 #needs to be added, no init method mod.nobs = len(y) res = mod.fit(start_params=[0.1, -0.8, 0.2, 0.1, 1.]) print 'DGP', ar, ma print res.params from statsmodels.regression import yule_walker print yule_walker(y, 2) #resi = mod.fit_invertible(start_params=[0.1,0,0.2,0, 0.5]) #print resi.params arpoly, mapoly = getpoly(mod, res.params[:-1]) data = sm.datasets.sunspots.load() #ys = data.endog[-100:] ## ys = data.endog[12:]-data.endog[:-12] ## ys -= ys.mean() ## mods = MLEGLS(ys) ## mods.nar, mods.nma = 13, 1 #needs to be added, no init method ## mods.nobs = len(ys) ## ress = mods.fit(start_params=np.r_[0.4, np.zeros(12), [0.2, 5.]],maxiter=200) ## print ress.params ## #from statsmodels.sandbox.tsa import arima as tsaa
def genelwwLOCwithoutdelay(one_signal, orderLPC, Fs_Hz, az_deg, el_deg, velocity_mps, Sigma_aec, xsensors_m): """ # Generate loss of coherence with noise # Synopsis: # genelwwLOCwithoutdelay(one_signal, orderLPC, # Fs_Hz, az_deg,el_deg, velocity_mps, Sigma_aec, xsensors_m) # Inputs # ones_signal: one SOI # orderLPC: # Fs_Hz: sampling frequency in Hz # az_deg, el_deg, velocity_mps: # deterministic part of the DOA # # Sigma_aec : 3 array of the varaince of the random part of the DOA # Sigma_aec[0] = std of the azimuth in rd # Sigma_aec[1] = std of the elevation in rd # Sigma_aec[2] = std of the velocity in m/s # # xsensor_m: M x 3 # 3D locations of the M sensors # #======= # Outputs: # signal_withLOC: # M-ary signal (with delays and LOC) # # tau_s: TDOA on the C=M(M-1)/2 sensor pairs # tau_s = (r_m-r_k)' x theta # where theta is the wavenumber (in s/m) # # signal_withoutLOC: # M-ary signal (with delays but without LOC) # # Gamma2_epsilon: # positive matrix en s2/m2Ctrl+S (LOC) #===================================================================== """ M = size(xsensors_m, 0) az_rd = az_deg * pi / 180 el_rd = el_deg * pi / 180 cosa = cos(az_rd) sina = sin(az_rd) cose = cos(el_rd) sine = sin(el_rd) ##===== deterministic part theta0_spm = array([-sina * cose, -cosa * cosa, sine] / velocity_mps) ##===== random part Jacobian = zeros([3, 3]) Jacobian[:,0] = array([-cosa*cose, sina*sine, \ sina*cose/velocity_mps]/velocity_mps) Jacobian[:,1] = array([sina*cose, cosa*sine, \ cosa*cose/velocity_mps]/velocity_mps) Jacobian[:,2] = array([0, cose, \ -sine/velocity_mps]/velocity_mps) Gamma2_epsilon = dot(transpose(Jacobian), \ dot((Sigma_aec*Sigma_aec),Jacobian)) ## AR analysis #%% N = len(one_signal) theta_lp, sigma_AR = yule_walker(one_signal, orderLPC) theta_AR = append(1, theta_lp) residue = lfilter(theta_AR, [1.0], one_signal) / sigma_AR F_Hz = Fs_Hz * array(range(N)) / N F2_Hz2 = F_Hz**2 pi2 = pi * pi #==== extract M random sequences from residue by permutation wwhite = zeros([N, M]) wwhite[:, 0] = residue[:, 0] for im in range(1, M): wwhite[:, im] = residue[random.permutation(N), 0] #=== innovation generation cf = ones([M, M, N]) cp = 0 for im1 in range(M - 1): for im2 in range(im1 + 1, M): cp = cp + 1 sigma2_s2 = dot( dot((xsensors_m[im2, :] - xsensors_m[im1, :]), Gamma2_epsilon), (xsensors_m[im2, :] - xsensors_m[im1, :])) # LOC WITHOUT delays cf[im1, im2, :] = exp(-2 * pi2 * F2_Hz2 * sigma2_s2) cf[im2, im1, :] = conj(cf[im1, im2, :]) Wf = fft(wwhite) Xf = zeros([N, M]) + complex(0, 1) * zeros([N, M]) #=== GG = zeros([M,M,N]); for indf in range(N / 2): # 1:N/2+1, GGaux = cf[:, :, indf] UG, GGauxsvd, VG = svd(GGaux) sqrtmGGaux = dot(dot(UG, diag(sqrt(GGauxsvd))), VG) Xf[indf, :] = dot(Wf[indf, :], sqrtmGGaux) Xf[range(N / 2 + 1, N), :] = conj(Xf[range(N - N / 2 - 1, 0, -1), :]) Xf[N / 2, :] = real(Xf[N / 2, :]) / 2.0 Xf[0, :] = real(Xf[0, :]) / 2.0 xt = ifft(Xf, axis=0) signal_out_aux = real(xt) signal_out_aux = signal_out_aux-ones([N,1])\ *mean(signal_out_aux) #=== synthesis of signal signal_out_with = signal_out_aux signal_withLOC = signal_out_aux for im in range(M): xe = signal_out_aux[:, im] signal_out_with[:, im] = lfilter([1.0], theta_AR, xe) signal_withLOC[:,im] = signal_out_with[:,im]\ / std(signal_out_with[:,im]) return signal_withLOC, theta0_spm, Gamma2_epsilon, cf
def test_yule_walker_inter(): # see 1869 x = np.array([1, -1, 2, 2, 0, -2, 1, 0, -3, 0, 0]) # it works regression.yule_walker(x, 3)
print "\nExample 1" ar = [1., -0.8] ma = [1., 0.5] y1 = arma.generate_sample(ar, ma, 1000,0.1) arma = ARMA(x=y1, p=1, q=1) rhohat1, cov_x1, infodict, mesg, ier = arma.fit(y1, 1, 1) print 'estimate' print rhohat1 print 'covariance' print cov_x1 err1 = arma.errfn(x=y1) print 'error' print np.var(err1) print regression.yule_walker(y1, order=2, inv=True) print "\nExample 2" arma2 = ARMA() nsample = 1000 ar = [1.0, -0.6, -0.1] ma = [1.0, 0.3, 0.2] y2 = arma2.generate_sample(ar,ma,nsample,0.1) rhohat2, cov_x2, infodict, mesg, ier = arma2.fit(y2,1,2) print rhohat2 print cov_x2 err2 = arma2.errfn(x=y2) print 'error = %s' % np.var(err2) print "estimated b, a" print arma2.b print arma2.a
res = arma.fit((4, 0, 0)) print(res[0]) acf1 = acf(arrvs[0]) acovf1b = acovf(arrvs[0], unbiased=False) acf2 = autocorr(arrvs[0]) acf2m = autocorr(arrvs[0] - arrvs[0].mean()) print(acf1[:10]) print(acovf1b[:10]) print(acf2[:10]) print(acf2m[:10]) x = arma_generate_sample([1.0, -0.8], [1.0], 500) print(acf(x)[:20]) print(regression.yule_walker(x, 10)) #ax = plt.axes() plt.plot(x) #plt.show() plt.figure() pltxcorr(plt, x, x) plt.figure() pltxcorr(plt, x, x, usevlines=False) plt.figure() #FIXME: plotacf was moved to graphics/tsaplots.py, and interface changed plot_acf(plt, acf1[:20], np.arange(len(acf1[:20])), usevlines=True) plt.figure() ax = plt.subplot(211) plot_acf(ax, acf1[:20], usevlines=True)
print "\nExample 1" ar = [1., -0.8] ma = [1., 0.5] y1 = arma.generate_sample(ar, ma, 1000, 0.1) arma = ARMA(x=y1, p=1, q=1) rhohat1, cov_x1, infodict, mesg, ier = arma.fit(y1, 1, 1) print 'estimate' print rhohat1 print 'covariance' print cov_x1 err1 = arma.errfn(x=y1) print 'error' print np.var(err1) print regression.yule_walker(y1, order=2, inv=True) print "\nExample 2" arma2 = ARMA() nsample = 1000 ar = [1.0, -0.6, -0.1] ma = [1.0, 0.3, 0.2] y2 = arma2.generate_sample(ar, ma, nsample, 0.1) rhohat2, cov_x2, infodict, mesg, ier = arma2.fit(y2, 1, 2) print rhohat2 print cov_x2 err2 = arma2.errfn(x=y2) print 'error = %s' % np.var(err2) print "estimated b, a" print arma2.b print arma2.a