sys.path.append("C:/Users/wigr11ab/Dropbox/KU/K3/FE/Python/") import timeSeriesModule as tsm # Import ARCH simulation import plotsModule as pltf # Custom plotting np.set_printoptions(suppress=True) # Disable scientific notation periods = 1000 x = np.arange(0, periods) arch = tsm.archFct(1., 0.5, periods) aarch = tsm.aArchFct(1, 0.6, 0.4, periods) x1 = arch[0][:periods - 1] y1 = arch[1][1:] x2 = aarch[0][:periods - 1] y2 = aarch[1][1:] pltf.scatterDuo(x1, x2, y1, y2, 'ARCH', 'A-ARCH', title='News Curve') # TAR model tar = tsm.tarFct(0., 0., 0., 1., 0.9, 2.5, periods) xTar = tar[:periods - 1] yTar = tar[1:] pltf.scatterUno(xTar, yTar, xLab='Lagged returns', title='TAR model') pltf.plotUno(x, tar, title='TAR Process')
pStar = pStarFct(mat, states, a_r, b_r) pStarT = pStarTFct(mat, states, a_r, a_s, b_r, p) # Compute the log-likelihood to maximise logLik = logLikFct(var, p, pStar, pStarT) # Save parameters for later plotting (redundant wrt optimisation) vs[:, m] = var ps[:, m] = p llh[m] = logLik # ============================================= # # ===== Plotting ============================== # # ============================================= # if states == 2: pltm.plotDuo(range(sims), vs[0,:], vs[1,:], 'Var_1', 'Var_2', 'Trials', 'Variance') pltm.plotDuo(range(sims), ps[0,:], ps[3,:], 'p11', 'p22', 'Trials', 'Probability') elif states == 3: pltm.plotTri(range(sims), vs[0,:], vs[1,:], vs[2,:], 'Trials', 'Var_1', 'Var_2', 'Var_3', 'Variance') pltm.plotTri(range(sims), ps[0,:], ps[4,:], ps[8,:], 'Trials', 'p11', 'p22', 'p33', 'Probability') pltm.plotUno(d, pStar[0,:], xLab = 'Time', yLab = 'p1', title = 'Smoothed State Probabilities') pltm.plotUno(d, pStar[1,:], xLab = 'Time', yLab = 'p2', title = 'Smoothed State Probabilities') pltm.plotUno(d, pStar[2,:], xLab = 'Time', yLab = 'p3', title = 'Smoothed State Probabilities') elif states == 4: pltm.plotQuad(range(sims), vs[0,:], vs[1,:], vs[2,:], vs[3,:], 'Trials', 'Var_1', 'Var_2', 'Var_3', 'Var_4', 'Variance') pltm.plotQuad(range(sims), ps[0,:], ps[5,:], ps[10,:], ps[15,:], 'Trials', 'p11', 'p22', 'p33', 'p44', 'Probability') pltm.plotUno(range(sims), llh, yLab = 'log-likelihood value')
sig2 = 1.0 alpha = 0.8 periods = 100 n = 10000 scores = np.zeros(n) for i in np.arange(n): tArch = tsm.tArchFct(sig2, alpha, 3, periods) s = score.tArch3(tArch) scores[i] = s np.mean(scores) np.var(scores) x = np.arange(n) pltm.plotUno(x, scores, 'Score value', 'Simulation trial', 'Distribution of scores', 'upper right') x = np.sort(np.random.normal(size=n)) # sm.qqplot(scores, ) pltm.scatterUno(x, np.sort(scores), yLab='Emprical quantiles: Score', xLab='Theoretical quantiles', title='Normal QQ-Plot') pltm.hist(scores, title='Histogram of scores') # Next exercise 1.7 sp500 = pd.DataFrame( pd.read_excel('C:/Users/wigr11ab/Dropbox/KU/K3/FE/Exercises/SP500.xlsx')) date = np.array(sp500[['Date']][15096:])
s1, s2, p = 1.0, 0.5, 0.3 # Compute initial pStar given initial parameters f1 = 1 / np.sqrt(2 * np.pi * s1) * np.exp(-0.5 * y**2 / s1) f2 = 1 / np.sqrt(2 * np.pi * s2) * np.exp(-0.5 * y**2 / s2) pStar = p * f1 / (p * f1 + (1 - p) * f2) for m in range(rep): # Reevaluate parameters given pStar s1 = sum(pStar * y**2) / sum(pStar) s2 = sum((1 - pStar) * y**2) / sum(1 - pStar) p = sum(pStar) / mat # Update pStar given new parameters f1 = 1 / np.sqrt(2 * np.pi * s1) * np.exp(-0.5 * y**2 / s1) f2 = 1 / np.sqrt(2 * np.pi * s2) * np.exp(-0.5 * y**2 / s2) pStar = p * f1 / (p * f1 + (1 - p) * f2) # Compute new pStar # Compute the log-likelihood to maximise logLik = pStar * np.log(f1 * p) + (1 - pStar) * np.log(f2 * (1 - p)) sVol = pStar * s1 + (1 - pStar) * s2 # Save parameters for later plotting (redundant wrt optimisation) par[0, m], par[1, m], par[2, m] = s1, s2, p llh[m] = sum(logLik) pltm.plotDuo(range(rep), par[0, :], par[1, :], 'Sigma_h', 'Sigma_l', 'Time', 'Volatility') pltm.plotUno(range(rep), par[2, :]) pltm.plotUno(range(rep), llh)
# Analysing the residuals z = x / sigma theta = np.exp(aPar) n = len(y) x = np.squeeze(y) xLag2 = x[:n - 1] ** 2 idxP = (x > 0)[:n - 1] idxN = (x < 0)[:n - 1] s2 = theta[0] + theta[1] * idxP * xLag2 + theta[2] * idxN * xLag2 z = x[1:] / s2 pltm.qqPlot(z) pltm.hist(z) pltm.plotUno(np.arange(n-1), z, yLab='Residuals') # Use the delta method to find se for GJR through AArch parameters (done manually) a = np.array([np.exp(aPar[0]), np.exp(aPar[1]), np.exp(aPar[2]) - np.exp(aPar[1])]) A = np.array([[a[0], 0, 0], \ [0, a[1], 0], \ [0, -a[1], a[2] + a[1]]]) se = np.sqrt(np.diag(A.dot(sandwich).dot(np.transpose(A)))) tVal = a / se gjrResults = pd.DataFrame([a, se, tVal, mlVal], \ columns=['sigma2', 'alpha', 'gamma'], \ index=['estimate', 'se', 't-val', 'ml val']) gjrResults mlResults