def small_data(): rs = np.random.RandomState([2389280, 238901, 382908031]) mod = ZeroMean(None, volatility=GARCH(), distribution=Normal(random_state=rs)) sim = mod.simulate([1e-4, 0.05, 0.90], nobs=1000) return sim.data
def test_blank(small_data, std_data): small_mod = ZeroMean(small_data, volatility=GARCH(), rescale=False) small_res = small_mod.fit(starting_values=np.array([1e-3, 0.05, 0.90]), disp="off") mod = ZeroMean(std_data, volatility=GARCH(), rescale=False) res = mod.fit(starting_values=np.array([1, 0.05, 0.90]), disp="off") assert_allclose(1e3 * small_res.params[0], res.params[0], rtol=5e-3)
def test_rescale_fit(small_data, std_data): small_mod = ZeroMean(small_data, volatility=GARCH(), rescale=True) small_res = small_mod.fit(disp="off") direct_mod = ZeroMean(10 * small_data, volatility=GARCH()) direct_res = direct_mod.fit(disp="off") assert_allclose(small_res.loglikelihood, direct_res.loglikelihood) small_fcast = small_res.forecast(start=0) direct_fcast = direct_res.forecast(start=0) assert_allclose(small_fcast.variance, direct_fcast.variance)
ConstantVariance, EWMAVariance, MIDASHyperbolic, RiskMetrics2006, ZeroMean, arch_model, ) from arch.univariate.mean import _ar_forecast, _ar_to_impulse SP500 = 100 * sp500.load()["Adj Close"].pct_change().dropna() MEAN_MODELS = [ HARX(SP500, lags=[1, 5]), ARX(SP500, lags=2), ConstantMean(SP500), ZeroMean(SP500), ] VOLATILITIES = [ ConstantVariance(), GARCH(), FIGARCH(), EWMAVariance(lam=0.94), MIDASHyperbolic(), HARCH(lags=[1, 5, 22]), RiskMetrics2006(), APARCH(), EGARCH(), ] MODEL_SPECS = list(product(MEAN_MODELS, VOLATILITIES))
parse_dates=True, index_col='Date', squeeze=True) returns = spClose.apply(np.log) - spClose.shift(1).apply(np.log) returns *= scale returns.dropna(inplace=True) omega = 0.000005 * scale**2 alpha = 0.07 beta = 0.85 theta = 0.5 # using NGARCH11 tsm = ZeroMean(returns) ngarch = NGARCH11(np.array([omega, alpha, beta, theta])) tsm.volatility = ngarch tsm.distribution = StudentsT() rst = tsm.fit(starting_values=np.array([omega, alpha, beta, theta, 10.0])) print(rst) rst.plot(annualize='D') sns.distplot(rst.std_resid, fit=stats.t) print( ngarch.is_valid(rst.params['alpha'], rst.params['beta'], rst.params['theta'])) sm.graphics.qqplot(rst.std_resid, line='45')
def test_blank(small_data, std_data): small_mod = ZeroMean(small_data, volatility=GARCH(), rescale=False) small_res = small_mod.fit(disp="off") mod = ZeroMean(std_data, volatility=GARCH(), rescale=False) res = mod.fit(disp="off") assert_allclose(1e3 * small_res.params[0], res.params[0], rtol=5e-3)
(returns['tn'] <= score_tn), ] cor_num = stats.pearsonr(cut['sp'], cut['tn']) cor0.loc[p, 'cor'] = cor_num[0] else: cut = returns.loc[(returns['sp'] > score_sp) & (returns['tn'] > score_tn), ] cor_num = stats.pearsonr(cut['sp'], cut['tn']) cor0.loc[p, 'cor'] = cor_num[0] cor0.plot() tsm_sp = ZeroMean(returns['sp']) garch = GARCH() tsm_sp.volatility = garch tsm_sp.distribution = StudentsT() rst_sp = tsm_sp.fit() filtered_sp = rst_sp.std_resid tsm_tn = ZeroMean(returns['tn']) garch = GARCH() tsm_tn.volatility = garch tsm_tn.distribution = StudentsT() rst_tn = tsm_tn.fit() filtered_tn = rst_tn.std_resid
scale = 100 # Exercise 1 & 3 data = pd.read_csv('data/Chapter8_Data.csv', parse_dates=True, index_col='date') returns = data.apply(np.log) - data.apply(np.log).shift() returns.dropna(inplace=True) returns *= scale returns.plot() # FHS fhs = ZeroMean(returns['Close']) garch = GARCH(p=1, q=1) fhs.distribution = StudentsT() fhs.volatility = garch rst = fhs.fit() print(rst) rst.plot(annualize='D') sns.distplot(rst.std_resid, fit=stats.t) forecast_variance_1day = rst.forecast(horizon=1).variance.iloc[-1, 0] rs = np.random.RandomState(1234)
std_p = stats.norm.fit(returns['portfolio'], floc=0)[1] VaRsp = -stats.norm.ppf(0.01, 0, scale=std_sp) VaRtn = -stats.norm.ppf(q=0.01, scale=std_tn) VaRp = -stats.norm.ppf(q=0.01, scale=std_p) print(VaRp < (VaRsp + VaRtn) / 2) # Exercise 4 omega = 1.5E-6 * scale**2 alpha = 0.05 beta = 0.8 theta = 1.25 tsm = ZeroMean(returns['sp500']) ngarch = NGARCH11(np.array([omega, alpha, beta, theta])) tsm.volatility = ngarch tsm.distribution = StudentsT() sp500_rst = tsm.fit() print(sp500_rst) sp500_rst.plot(annualize='D') sns.distplot(sp500_rst.std_resid, fit=stats.t) print( ngarch.is_valid(sp500_rst.params['alpha'], sp500_rst.params['beta'], sp500_rst.params['theta'])) sm.graphics.qqplot(sp500_rst.std_resid, line='45')
def simulate_2(PARS, sample_size): zm = ZeroMean() zm.volatility = GARCH(p=1, q=1) sim_data = zm.simulate(PARS, sample_size) return sim_data['data']
def two_pass_est( self, print_results = True, plot_results = False ): # # See Chapter 8.4.1 of the lecture notes # # TODO: First, specify 'mean equation' and estimate it via OLS # # Choosing the 'right' (best) model # -> Use information criteria, e.g. AIC/ BIC # # We use a parsimonious model here # -> AR(1) # n = len(self.returns) Y = list(self.returns[1:n]) X = self.returns[0:n-1] X = sm.add_constant(X) # Add constant model_mean = sm.OLS(Y, X) results_mean = model_mean.fit() # Fit the model if print_results == True: print( results_mean.summary() ) # TODO: Second, take residuals eps_t and square them (eps^2_t) # resid = results_mean.resid resid_squ = resid**2 if plot_results == True: plt.plot( resid_squ ) # # Test for ARCH effects (not implemented here) # TODO: Third, run MLE optimization with eps^2_t as observed time series to estimate ARCH model # # ARCH(1) model: # sigma^2_t-1 := alpha_0 + alpha_1 * eps^2_t-1, # with alpha_0 > 0 and alpha_1 >= 0 # # Note, 'arch' package requires 'not squared' residuals as input # # model_vol = ZeroMean(resid) # Instantiate object (mean equation) (mean is included in resid) model_vol.volatility = ARCH(p=1) results_vol = model_vol.fit() # Fit the model volatility = results_vol.conditional_volatility # Extract the volatility if print_results == True: print( results_vol.summary() ) if plot_results == True: results_vol.plot() return volatility, results_vol, resid_squ