Пример #1
0
def mcarma22(niter=10, nsample=1000, ar=None, ma=None, sig=0.5):
    '''run Monte Carlo for ARMA(2,2)

    DGP parameters currently hard coded
    also sample size `nsample`

    was not a self contained function, used instances from outer scope
      now corrected

    '''
    #nsample = 1000
    #ar = [1.0, 0, 0]
    if ar is None:
        ar = [1.0, -0.55, -0.1]
    #ma = [1.0, 0, 0]
    if ma is None:
        ma = [1.0, 0.3, 0.2]
    results = []
    results_bse = []
    for _ in range(niter):
        y2 = arma_generate_sample(ar, ma, nsample + 1000, sig)[-nsample:]
        y2 -= y2.mean()
        arest2 = Arma(y2)
        rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2, 2))
        results.append(rhohat2a)
        err2a = arest2.geterrors(rhohat2a)
        sige2a = np.sqrt(np.dot(err2a, err2a) / nsample)
        #print 'sige2a', sige2a,
        #print 'cov_x2a.shape', cov_x2a.shape
        #results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a)))
        if not cov_x2a is None:
            results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a)))
        else:
            results_bse.append(np.nan + np.zeros_like(rhohat2a))
    return np.r_[ar[1:], ma[1:]], np.array(results), np.array(results_bse)
Пример #2
0
def mcarma22(niter=10, nsample=1000, ar=None, ma=None, sig=0.5):
    '''run Monte Carlo for ARMA(2,2)

    DGP parameters currently hard coded
    also sample size `nsample`

    was not a self contained function, used instances from outer scope
      now corrected

    '''
    #nsample = 1000
    #ar = [1.0, 0, 0]
    if ar is None:
        ar = [1.0, -0.55, -0.1]
    #ma = [1.0, 0, 0]
    if ma is None:
        ma = [1.0,  0.3,  0.2]
    results = []
    results_bse = []
    for _ in range(niter):
        y2 = arma_generate_sample(ar,ma,nsample+1000, sig)[-nsample:]
        y2 -= y2.mean()
        arest2 = Arma(y2)
        rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,2))
        results.append(rhohat2a)
        err2a = arest2.geterrors(rhohat2a)
        sige2a = np.sqrt(np.dot(err2a,err2a)/nsample)
        #print 'sige2a', sige2a,
        #print 'cov_x2a.shape', cov_x2a.shape
        #results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a)))
        if not cov_x2a is None:
            results_bse.append(sige2a * np.sqrt(np.diag(cov_x2a)))
        else:
            results_bse.append(np.nan + np.zeros_like(rhohat2a))
    return np.r_[ar[1:], ma[1:]], np.array(results), np.array(results_bse)
Пример #3
0
    print resls[0]
    print resls[1]

    print '\nparameter estimate - comparing methods'
    print '---------------------------------------'
    print 'parameter of DGP ar(1), ma(1), sigma_error'
    print[-0.8, 0.5, 0.1]
    print 'mle with fmin'
    print arma1res.params
    print 'mle with bfgs'
    print res2.params
    print 'cond. least squares uses optim.leastsq ?'
    errls = arest.error_estimate
    print resls[0], np.sqrt(np.dot(errls, errls) / errls.shape[0])

    err = arma1.geterrors(res2.params)
    print 'cond least squares parameter cov'
    #print np.dot(err,err)/err.shape[0] * resls[1]
    #errls = arest.error_estimate
    print np.dot(errls, errls) / errls.shape[0] * resls[1]
    #    print 'fmin hessian'
    #    print arma1res.model.optimresults['Hopt'][:2,:2]
    print 'bfgs hessian'
    print res2.model.optimresults['Hopt'][:2, :2]
    print 'numdifftools inverse hessian'
    print -np.linalg.inv(
        ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))[:2, :2]

    print '\nFitting Arma(1,1) to squared data'
    arma3 = Arma(y1**2)
    res3 = arma3.fit(method='bfgs')
Пример #4
0


    print '\nparameter estimate - comparing methods'
    print '---------------------------------------'
    print 'parameter of DGP ar(1), ma(1), sigma_error'
    print [-0.8, 0.5, 0.1]
    print 'mle with fmin'
    print arma1res.params
    print 'mle with bfgs'
    print res2.params
    print 'cond. least squares uses optim.leastsq ?'
    errls = arest.error_estimate
    print resls[0], np.sqrt(np.dot(errls,errls)/errls.shape[0])

    err = arma1.geterrors(res2.params)
    print 'cond least squares parameter cov'
    #print np.dot(err,err)/err.shape[0] * resls[1]
    #errls = arest.error_estimate
    print np.dot(errls,errls)/errls.shape[0] * resls[1]
#    print 'fmin hessian'
#    print arma1res.model.optimresults['Hopt'][:2,:2]
    print 'bfgs hessian'
    print res2.model.optimresults['Hopt'][:2,:2]
    print 'numdifftools inverse hessian'
    print -np.linalg.inv(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))[:2,:2]

    print '\nFitting Arma(1,1) to squared data'
    arma3 = Arma(y1**2)
    res3 = arma3.fit(method='bfgs')
    print res3.params
Пример #5
0
###bug in current version, fixed in Skipper and 1 more
###arr[1:q,:] = params[p+k:p+k+q]  # p to p+q short params are MA coeffs
###ValueError: array dimensions are not compatible for copy
##arma22 = ARMA_kf(y22, constant=False, order=(2,2))
##res = arma22.fit(start_params=start_params)
##print res.params

print '\nARIMA new'
arest2 = Arma(y22)

naryw = 4  #= 30
resyw = sm.regression.yule_walker(y22, order=naryw, inv=True)
arest2.nar = naryw
arest2.nma = 0
e = arest2.geterrors(np.r_[1, -resyw[0]])
x=sm.tsa.tsatools.lagmat2ds(np.column_stack((y22,e)),3,dropex=1,
                            trim='both')
yt = x[:,0]
xt = x[:,1:]
res_ols = sm.OLS(yt, xt).fit()
print 'hannan_rissannen'
print res_ols.params
start_params = res_ols.params
start_params_mle = np.r_[-res_ols.params[:2],
                          res_ols.params[2:],
                          #res_ols.scale]
                          #areste.var()]
                          np.sqrt(res_ols.scale)]
#need to iterate, ar1 too large ma terms too small
#fix large parameters, if hannan_rissannen are too large