Пример #1
0
db_invariants_nextstep = pd.read_csv(path + 'db_invariants_nextstep.csv')
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step03-implementation-step01): Set the flexible probabilities

# +
# time and state conditioning on smoothed and scored VIX returns

# state indicator: VIX compounded return realizations
db_vix['c_vix'] = np.log(db_vix).diff()
# extract data for analysis dates
c_vix = db_vix.c_vix[dates].values
# smoothing
z_smooth = smoothing(c_vix, tau_hl_smooth)
# scoring
z = scoring(z_smooth, tau_hl_score)
# target value
z_star = z[-1]
# prior probabilities
p_prior = exp_decay_fp(t_, tau_hl_prior)
# posterior probabilities
p = conditional_fp(z, z_star, alpha, p_prior)
# effective number of scenarios
ens = effective_num_scenarios(p)

print('Effective number of scenarios is', int(round(ens)))
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step03-implementation-step02): Estimate the marginal distributions for stocks, S&P 500 and implied volatility

# +
Пример #2
0
    print('Fitting ' + str(i+1) + '-th GARCH; ' +
          str(int((i+1)/i_*100)) + '% done.')
    _, _, epsi[:, i] = fit_garch_fp(np.diff(x[:, i], axis=0), p_garch)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step03): Historical estimation

# +
# time and state conditioning on smoothed and scored VIX returns

# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(vix))
# smoothing
z_vix = smoothing(c_vix, tau_hl_smooth)
# scoring
z_vix = scoring(z_vix, tau_hl_score)
# target value
z_vix_star = z_vix[-1]
# flexible probabilities
p_pri = exp_decay_fp(len(dates), tau_hl_pri)
p = conditional_fp(z_vix, z_vix_star, alpha_leeway, p_pri)

mu_hat, sig2_hat = meancov_sp(epsi, p)
# -

# ## Save database

# +
out = pd.DataFrame({stocks_names[i]: epsi[:, i]
                    for i in range(i_)}, index=dates)
out = out[list(stocks_names[:i_])]