Example #1
0
# initialize temporary databases
db_invariants = {}
db_nextstep = {}
db_garch_sig2 = {}
db_param = {}
param_names = ['a', 'b', 'c', 'mu']
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step02-implementation-step01): GARCH(1,1) fit on stocks log-values

for i in range(n_stocks):
    # time series of risk driver increment
    dx = np.diff(x[:, i])
    # fit parameters
    par, sig2, epsi, *_ = fit_garch_fp(dx)
    # store next-step function and invariants
    db_invariants[i] = np.array(epsi)
    db_param[i] = par
    db_nextstep[i] = 'GARCH(1,1)'
    db_garch_sig2[i] = sig2

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step02-implementation-step02): GARCH(1,1) fit on S&P index log-values

# time series of risk driver increment
dx = np.diff(x[:, n_stocks])
# fit parameters
par, sig2, epsi, *_ = fit_garch_fp(dx)
# store next-step function and invariants
db_invariants[n_stocks] = np.array(epsi)
db_param[n_stocks] = par
Example #2
0
v_stock = np.array(df_stocks.iloc[:, :n_])
dx = np.diff(np.log(v_stock), axis=0)  # S&P 500 index compounded return
t_ = dx.shape[0]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step02): Set flexible probabilities

p = exp_decay_fp(t_, tau_hl)  # flexible probabilities

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step03): Fit a GARCH(1,1) on each time series of compounded returns

param = np.zeros((4, n_))
sigma2 = np.zeros((t_, n_))
xi = np.zeros((t_, n_))
for n in range(n_):
    param[:, n], sigma2[:, n], xi[:, n] = \
        fit_garch_fp(dx[:, n], p, rescale=True)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step04): Estimate marginal distributions by fitting a Student t distribution via MLFP

mu_marg = np.zeros(n_)
sigma2_marg = np.zeros(n_)
for n in range(n_):
    mu_marg[n], sigma2_marg[n] = fit_locdisp_mlfp(xi[:, n], p=p, nu=nu)

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step05): Map each marginal time series into standard normal realizations

xi_tilde = np.zeros((t_, n_))
for n in range(n_):
    u = t.cdf(xi[:, n],
              df=10**6,
              loc=mu_marg[n],
Example #3
0
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step01): Risk drivers identification

x = np.log(v)  # log-values
d_ = x.shape[1]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step02): Quest for invariance

# +
i_ = d_
epsi = np.zeros((t_, i_))
p_garch = exp_decay_fp(t_, tau_hl_garch)

for i in range(i_):
    print('Fitting ' + str(i+1) + '-th GARCH; ' +
          str(int((i+1)/i_*100)) + '% done.')
    _, _, epsi[:, i] = fit_garch_fp(np.diff(x[:, i], axis=0), p_garch)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step03): Historical estimation

# +
# time and state conditioning on smoothed and scored VIX returns

# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(vix))
# smoothing
z_vix = smoothing(c_vix, tau_hl_smooth)
# scoring
z_vix = scoring(z_vix, tau_hl_score)
# target value
z_vix_star = z_vix[-1]
Example #4
0
# invariants extracted from the log-implied volatility
db_calloption_epsi_var1 = pd.read_csv(path + 'db_calloption_epsi_var1.csv',
                                      index_col=0,
                                      parse_dates=True)
epsi_var1 = db_calloption_epsi_var1.values
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_calloption-implementation-step01): Extract invariants for the S&P 500 index and create the realized information panel

# +
# compute risk driver for the S&P 500 index as the log-value
log_underlying = \
    np.log(np.array(db_sp500.loc[(db_sp500.index.isin(dates)), 'SPX_close']))

# model log_underlying as GARCH(1,1)
par, sig2, epsi_garch = fit_garch_fp(np.diff(log_underlying))

# store all the invariants in the realized information panel
epsi = np.c_[epsi_garch, epsi_var1]
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_calloption-implementation-step02): Set the flexible probabilities

# +
t_, i_ = epsi.shape
# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(np.array(db_vix.loc[dates].VIX_close)))
# smoothing
z_smooth = smoothing(c_vix, tau_hl_smooth)
# scoring
z = scoring(z_smooth, tau_hl_score)
Example #5
0
db_stocks.index = pd.to_datetime(db_stocks.index)
dates =  pd.to_datetime(db_stocks.index)

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_elltest_garchres_stock-implementation-step01): Compute risk drivers for GE and compounded return

# +
# risk drivers
log_underlying = np.log(np.array(db_stocks.loc[dates, 'GE']))

# compounded return
comp_return = np.diff(log_underlying)
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_elltest_garchres_stock-implementation-step02): Fit GARCH(1,1), compute residuals and their absolute values

# +
_, _, epsi = fit_garch_fp(comp_return)

abs_epsi = np.abs(epsi)
# -

# ## Plots

fig = plt.figure()
acf_x, conf_int_x = \
    invariance_test_ellipsoid(abs_epsi, l_, conf_lev=conf_lev, bl=-0.75,
                              title='Absolute residuals of a GARCH(1, 1) model fitted on stock compounded return',
                              plot_test=True)
fig = plt.gcf()
add_logo(fig, set_fig_size=False, size_frac_x=1/8)
Example #6
0
v = db_stocks.loc[:, ['GE', 'JPM']].values

# VIX (used for time-state conditioning)
vix_path = '../../../databases/global-databases/derivatives/db_vix/data.csv'
db_vix = pd.read_csv(vix_path, usecols=['date', 'VIX_close'], index_col=0)
db_vix.index = pd.to_datetime(db_vix.index)
dates = pd.to_datetime(db_stocks.loc[::20, ['GE', 'JPM']].index)
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-implementation-step01): Fit GARCH process and extract realized invariants

# select monthly values
v = v[::20, :]
# compute monthly compounded returns
c = np.diff(np.log(v), axis=0)
_, _, epsi_garch_ge = fit_garch_fp(c[:, 0])
_, _, epsi_garch_jpm = fit_garch_fp(c[:, 1])
epsi = np.c_[epsi_garch_ge, epsi_garch_jpm]
t_ = v.shape[0] - 1

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_estimation_copmarg_ratings-implementation-step02): Set the flexible probabilities

# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(np.array(db_vix.loc[dates, :].VIX_close)))
# smoothing
z_smooth = smoothing(c_vix, tau_hl_smooth)
# scoring
z = scoring(z_smooth, tau_hl_score)
# target value
z_star = z[-1]
# prior probabilities
                     skiprows=[0],
                     index_col=0,
                     parse_dates=True,
                     usecols=['name', 'CSCO', 'GE'],
                     skip_blank_lines=True)
stocks = stocks.dropna(how='any')  # stocks values

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_mlfp_ellipsoid_convergence-implementation-step01): Compute the log-values of the stocks

x_csco = np.log(np.array(stocks.CSCO))
x_ge = np.log(np.array(stocks.GE))

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_mlfp_ellipsoid_convergence-implementation-step02): Compute the invariants using a GARCH(1,1) fit

# +
_, _, epsi_csco = fit_garch_fp(np.diff(x_csco))
_, _, epsi_ge = fit_garch_fp(np.diff(x_ge))

epsi = np.array([epsi_csco, epsi_ge]).T
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_mlfp_ellipsoid_convergence-implementation-step03): Set the exp. decay probabilities for MLFP estimation and compute the effective number of scenarios

p = exp_decay_fp(len(epsi_csco), tau_hl)  # exp. decay flexible probabilities
ens = effective_num_scenarios(p)  # effective number of scenarios

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_mlfp_ellipsoid_convergence-implementation-step04): Perform the MLFP estimation

mu_mlfp, sig2_mlfp = fit_locdisp_mlfp(epsi,
                                      p=p,
                                      nu=nu,