def mcarma22(niter=10, nsample=1000, ar=None, ma=None, sig=0.5): '''run Monte Carlo for ARMA(2,2) DGP parameters currently hard coded also sample size `nsample` was not a self contained function, used instances from outer scope now corrected ''' #nsample = 1000 #ar = [1.0, 0, 0] if ar is None: ar = [1.0, -0.55, -0.1] #ma = [1.0, 0, 0] if ma is None: ma = [1.0, 0.3, 0.2] results = [] results_bse = [] for _ in range(niter): y2 = arma_generate_sample(ar,ma,nsample+1000, sig)[-nsample:] y2 -= y2.mean() arest2 = Arma(y2) rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,2)) results.append(rhohat2a) err2a = arest2.geterrors(rhohat2a) sige2a = np.sqrt(np.dot(err2a,err2a)/nsample) #print('sige2a', sige2a, #print('cov_x2a.shape', cov_x2a.shape #results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a))) if cov_x2a is not None: results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a))) else: results_bse.append(np.nan + np.zeros_like(rhohat2a)) return np.r_[ar[1:], ma[1:]], np.array(results), np.array(results_bse)
def mcarma22(niter=10, nsample=1000, ar=None, ma=None, sig=0.5): '''run Monte Carlo for ARMA(2,2) DGP parameters currently hard coded also sample size `nsample` was not a self contained function, used instances from outer scope now corrected ''' #nsample = 1000 #ar = [1.0, 0, 0] if ar is None: ar = [1.0, -0.55, -0.1] #ma = [1.0, 0, 0] if ma is None: ma = [1.0, 0.3, 0.2] results = [] results_bse = [] for _ in range(niter): y2 = arma_generate_sample(ar, ma, nsample + 1000, sig)[-nsample:] y2 -= y2.mean() arest2 = Arma(y2) rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2, 2)) results.append(rhohat2a) err2a = arest2.geterrors(rhohat2a) sige2a = np.sqrt(np.dot(err2a, err2a) / nsample) #print 'sige2a', sige2a, #print 'cov_x2a.shape', cov_x2a.shape #results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a))) if not cov_x2a is None: results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a))) else: results_bse.append(np.nan + np.zeros_like(rhohat2a)) return np.r_[ar[1:], ma[1:]], np.array(results), np.array(results_bse)
def setup_class(cls): nobs = 500 ar = [1, -0.5, 0.1] ma = [1, 0.7] dist = lambda n: np.random.standard_t(3, size=n) np.random.seed(8659567) x = arma_generate_sample(ar, ma, nobs, sigma=1, distrvs=dist, burnin=500) mod = Arma(x) order = (2, 1) cls.res_ls = mod.fit(order=order) cls.res = mod.fit_mle(order=order, start_params=np.r_[cls.res_ls[0], 1], method='nm', disp=False) cls.res1_table = np.array( [[ 0.4339072 , -0.08402653, 0.73292344, 1.61661128], [ 0.05854268, 0.05562941, 0.04034178, 0.0511207 ], [ 7.4118102 , -1.51046975, 18.16785075, 31.62341666], [ 0. , 0.1309236 , 0. , 0. ], [ 0.06713617, 0.05469138, 0.03785006, 0.1071093 ], [ 0.05504093, 0.0574849 , 0.04350945, 0.02510928]]) cls.res1_conf_int = np.array([[ 0.31916567, 0.54864874], [-0.19305817, 0.0250051 ], [ 0.65385501, 0.81199188], [ 1.51641655, 1.71680602]]) cls.ls_params = np.array([ 0.43393123, -0.08402678, 0.73293058]) cls.ls_bse = np.array([ 0.0377741 , 0.03567847, 0.02744488])
def test_compare_arma(): #this is a preliminary test to compare arma_kf, arma_cond_ls and arma_cond_mle #the results returned by the fit methods are incomplete #for now without random.seed #np.random.seed(9876565) x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000) # this used kalman filter through descriptive # d = ARMA(x) # d.fit((1,1), trend='nc') # dres = d.res modkf = ARMA(x) ##rkf = mkf.fit((1,1)) ##rkf.params reskf = modkf.fit((1,1), trend='nc', disp=-1) dres = reskf modc = Arma(x) resls = modc.fit(order=(1,1)) rescm = modc.fit_mle(order=(1,1), start_params=[0.4,0.4, 1.], disp=0) #decimal 1 corresponds to threshold of 5% difference #still different sign corrcted #assert_almost_equal(np.abs(resls[0] / d.params), np.ones(d.params.shape), decimal=1) assert_almost_equal(resls[0] / dres.params, np.ones(dres.params.shape), decimal=1) #rescm also contains variance estimate as last element of params #assert_almost_equal(np.abs(rescm.params[:-1] / d.params), np.ones(d.params.shape), decimal=1) assert_almost_equal(rescm.params[:-1] / dres.params, np.ones(dres.params.shape), decimal=1)
def setup_class(cls): nobs = 500 ar = [1, -0.5, 0.1] ma = [1, 0.7] dist = lambda n: np.random.standard_t(3, size=n) np.random.seed(8659567) x = arma_generate_sample(ar, ma, nobs, sigma=1, distrvs=dist, burnin=500) mod = Arma(x) order = (2, 1) cls.res_ls = mod.fit(order=order) cls.res = mod.fit_mle(order=order, start_params=np.r_[cls.res_ls[0], 1], method='nm', disp=False) cls.res1_table = np.array( [[ 0.4339072 , -0.08402653, 0.73292344, 1.61661128], [ 0.05854268, 0.05562941, 0.04034178, 0.0511207 ], [ 7.4118102 , -1.51046975, 18.16785075, 31.62341666], [ 0. , 0.1309236 , 0. , 0. ], [ 0.06713617, 0.05469138, 0.03785006, 0.1071093 ], [ 0.05504093, 0.0574849 , 0.04350945, 0.02510928]]) cls.res1_conf_int = np.array([[ 0.31916567, 0.54864874], [-0.19305817, 0.0250051 ], [ 0.65385501, 0.81199188], [ 1.51641655, 1.71680602]]) cls.ls_params = np.array([ 0.43393123, -0.08402678, 0.73293058]) cls.ls_bse = np.array([ 0.0377741 , 0.03567847, 0.02744488])
def setup_class(cls): nobs = 500 ar = [1, -0.5, 0.1] ma = [1, 0.7] dist = partial(np.random.standard_t, 3) np.random.seed(8659567) x = arma_generate_sample(ar, ma, nobs, scale=1, distrvs=dist, burnin=500) with pytest.warns(FutureWarning): mod = Arma(x) order = (2, 1) cls.res_ls = mod.fit(order=order) cls.res = mod.fit_mle( order=order, start_params=np.r_[cls.res_ls[0], 1], method="nm", disp=False, ) cls.res1_table = np.array([ [0.43390720, -0.08402653, 0.73292344, 1.61661128], [0.05854268, 0.055629410, 0.04034178, 0.05112070], [7.41181020, -1.51046975, 18.16785075, 31.62341666], [0.00000000, 0.130923600, 0.00000000, 0.00000000], [0.06713617, 0.054691380, 0.03785006, 0.10710930], [0.05504093, 0.057484900, 0.04350945, 0.02510928], ]) cls.res1_conf_int = np.array([ [0.31916567, 0.54864874], [-0.19305817, 0.02500510], [0.65385501, 0.81199188], [1.51641655, 1.71680602], ]) cls.ls_params = np.array([0.43393123, -0.08402678, 0.73293058]) cls.ls_bse = np.array([0.0377741, 0.03567847, 0.02744488])
def test_compare_arma(): #this is a preliminary test to compare arma_kf, arma_cond_ls and arma_cond_mle #the results returned by the fit methods are incomplete #for now without random.seed #np.random.seed(9876565) x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000) # this used kalman filter through descriptive # d = ARMA(x) # d.fit((1,1), trend='nc') # dres = d.res modkf = ARMA(x) ##rkf = mkf.fit((1,1)) ##rkf.params reskf = modkf.fit((1, 1), trend='nc', disp=-1) dres = reskf modc = Arma(x) resls = modc.fit(order=(1, 1)) rescm = modc.fit_mle(order=(1, 1), start_params=[0.4, 0.4, 1.], disp=0) #decimal 1 corresponds to threshold of 5% difference #still different sign corrcted #assert_almost_equal(np.abs(resls[0] / d.params), np.ones(d.params.shape), decimal=1) assert_almost_equal(resls[0] / dres.params, np.ones(dres.params.shape), decimal=1) #rescm also contains variance estimate as last element of params #assert_almost_equal(np.abs(rescm.params[:-1] / d.params), np.ones(d.params.shape), decimal=1) assert_almost_equal(rescm.params[:-1] / dres.params, np.ones(dres.params.shape), decimal=1)
ar = [1.0, -0.8] ma = [1.0, 0.5] y1 = arma_generate_sample(ar, ma, 1000, 0.1) y1 -= y1.mean() #no mean correction/constant in estimation so far arma1 = Arma(y1) arma1.nar = 1 arma1.nma = 1 arma1res = arma1.fit_mle(order=(1, 1), method='fmin') print(arma1res.params) #Warning need new instance otherwise results carry over arma2 = Arma(y1) arma2.nar = 1 arma2.nma = 1 res2 = arma2.fit(method='bfgs') print(res2.params) print(res2.model.hessian(res2.params)) print(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params)) arest = tsa.arima.ARIMA(y1) resls = arest.fit((1, 0, 1)) print(resls[0]) print(resls[1]) print('\nparameter estimate - comparing methods') print('---------------------------------------') print('parameter of DGP ar(1), ma(1), sigma_error') print([-0.8, 0.5, 0.1]) print('mle with fmin') print(arma1res.params) print('mle with bfgs')
#need to iterate, ar1 too large ma terms too small #fix large parameters, if hannan_rissannen are too large start_params_mle[:-1] = (np.sign(start_params_mle[:-1]) * np.minimum(np.abs(start_params_mle[:-1]), 0.75)) print 'conditional least-squares' #print rhohat2 print 'with mle' arest2.nar = 2 arest2.nma = 2 # res = arest2.fit_mle(start_params=start_params_mle, method='nm') #no order in fit print res.params rhohat2, cov_x2a, infodict, mesg, ier = arest2.fit((2, 2)) print '\nARIMA_old' arest = ARIMA_old(y22) rhohat1, cov_x1, infodict, mesg, ier = arest.fit((2, 0, 2)) print rhohat1 print np.sqrt(np.diag(cov_x1)) err1 = arest.errfn(x=y22) print np.var(err1) print 'bse ls, formula not checked' print np.sqrt(np.diag(cov_x1)) * err1.std() print 'bsejac for mle' #print arest2.bsejac #TODO:check bsejac raises singular matrix linalg error #in model.py line620: return np.linalg.inv(np.dot(jacv.T, jacv)) print '\nyule-walker'
#need to iterate, ar1 too large ma terms too small #fix large parameters, if hannan_rissannen are too large start_params_mle[:-1] = (np.sign(start_params_mle[:-1]) * np.minimum(np.abs(start_params_mle[:-1]),0.75)) print('conditional least-squares') #print rhohat2 print('with mle') arest2.nar = 2 arest2.nma = 2 # res = arest2.fit_mle(start_params=start_params_mle, method='nm') #no order in fit print(res.params) rhohat2, cov_x2a, infodict, mesg, ier = arest2.fit((2,2)) print('\nARIMA_old') arest = ARIMA_old(y22) rhohat1, cov_x1, infodict, mesg, ier = arest.fit((2,0,2)) print(rhohat1) print(np.sqrt(np.diag(cov_x1))) err1 = arest.errfn(x=y22) print(np.var(err1)) print('bse ls, formula not checked') print(np.sqrt(np.diag(cov_x1))*err1.std()) print('bsejac for mle') #print arest2.bsejac #TODO:check bsejac raises singular matrix linalg error #in model.py line620: return np.linalg.inv(np.dot(jacv.T, jacv)) print('\nyule-walker')
print('time used:', t2-t1) print("Arma.fit_mle results") # have to set nar and nma manually arma1.nar = 2 arma1.nma = 2 t2=time() ret = arma1.fit_mle() t3=time() print("params, first 4, sigma, last 1 ", ret.params) results += ["Arma.fit_mle ", ret.params[:4], ret.params[-1], ret.llf] print('time used:', t3-t2) print("Arma.fit method = \"ls\"") t3=time() ret2 = arma1.fit(order=(2,0,2), method="ls") t4=time() print(ret2[0]) results += ["Arma.fit ls", ret2[0]] print('time used:', t4-t3) print("Arma.fit method = \"CLS\"") t4=time() ret3 = arma1.fit(order=(2,0,2), method="None") t5=time() print(ret3) results += ["Arma.fit other", ret3[0]] print('time used:', t5-t4) for i in results: print(i)
from numpy.testing import assert_almost_equal import matplotlib.pyplot as plt import statsmodels.sandbox.tsa.fftarma as fa from statsmodels.tsa.descriptivestats import TsaDescriptive from statsmodels.tsa.arma_mle import Arma x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000) d = TsaDescriptive(x) d.plot4() #d.fit(order=(1,1)) d.fit((1,1), trend='nc') print(d.res.params) modc = Arma(x) resls = modc.fit(order=(1,1)) print(resls[0]) rescm = modc.fit_mle(order=(1,1), start_params=[-0.4,0.4, 1.]) print(rescm.params) #decimal 1 corresponds to threshold of 5% difference assert_almost_equal(resls[0] / d.res.params, 1, decimal=1) assert_almost_equal(rescm.params[:-1] / d.res.params, 1, decimal=1) #copied to tsa.tests plt.figure() plt.plot(x, 'b-o') plt.plot(modc.predicted(), 'r-') plt.figure() plt.plot(modc.error_estimate) #plt.show()
print('time used:', t2 - t1) print("Arma.fit_mle results") # have to set nar and nma manually arma1.nar = 2 arma1.nma = 2 t2 = time() ret = arma1.fit_mle() t3 = time() print("params, first 4, sigma, last 1 ", ret.params) results += ["Arma.fit_mle ", ret.params[:4], ret.params[-1], ret.llf] print('time used:', t3 - t2) print("Arma.fit method = \"ls\"") t3 = time() ret2 = arma1.fit(order=(2, 0, 2), method="ls") t4 = time() print(ret2[0]) results += ["Arma.fit ls", ret2[0]] print('time used:', t4 - t3) print("Arma.fit method = \"CLS\"") t4 = time() ret3 = arma1.fit(order=(2, 0, 2), method="None") t5 = time() print(ret3) results += ["Arma.fit other", ret3[0]] print('time used:', t5 - t4) for i in results: print(i)
ar = [1.0, -0.8] ma = [1.0, 0.5] y1 = arma_generate_sample(ar,ma,1000,0.1) y1 -= y1.mean() #no mean correction/constant in estimation so far arma1 = Arma(y1) arma1.nar = 1 arma1.nma = 1 arma1res = arma1.fit_mle(order=(1,1), method='fmin') print arma1res.params #Warning need new instance otherwise results carry over arma2 = Arma(y1) arma2.nar = 1 arma2.nma = 1 res2 = arma2.fit(method='bfgs') print res2.params print res2.model.hessian(res2.params) print ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params) arest = tsa.arima.ARIMA(y1) resls = arest.fit((1,0,1)) print resls[0] print resls[1] print '\nparameter estimate - comparing methods' print '---------------------------------------' print 'parameter of DGP ar(1), ma(1), sigma_error' print [-0.8, 0.5, 0.1] print 'mle with fmin'