Пример #1
0
# S&P 500 index value
spx_path = '../../../databases/global-databases/equities/db_stocks_SP500/SPX.csv'
spx_all = pd.read_csv(spx_path, parse_dates=['date'])
spx = spx_all.loc[(spx_all['date'] >= pd.to_datetime('2004-01-02')) &
                  (spx_all['date'] < pd.to_datetime('2017-09-01'))]

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step01): Compute the S&P 500 compounded return

# invariants (S&P500 log-return)
epsi = np.diff(np.log(spx.SPX_close))  # S&P 500 index compounded return

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step02): Compute the time exponential decay probabilities

t_ = len(epsi)
t_star = t_
p_exp = exp_decay_fp(t_, tau_hl, t_star)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step03): Compute the effective number of scenarios

ens = effective_num_scenarios(p_exp)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step04): Compute flexible probabilities histogram

f_eps, x_eps = histogram_sp(epsi, p=p_exp, k_=10*np.log(t_))

# ## Plots

# +
# figure settings
plt.style.use('arpm')
grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
Пример #2
0
df_stocks = df_stocks.loc[(df_stocks.index >= t_first)
                          & (df_stocks.index <= t_last)]

# remove the stocks with missing values
df_stocks = df_stocks.dropna(axis=1, how='any')
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step01): Compute log-returns

v_stock = np.array(df_stocks.iloc[:, :n_])
dx = np.diff(np.log(v_stock), axis=0)  # S&P 500 index compounded return
t_ = dx.shape[0]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step02): Set flexible probabilities

p = exp_decay_fp(t_, tau_hl)  # flexible probabilities

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step03): Fit a GARCH(1,1) on each time series of compounded returns

param = np.zeros((4, n_))
sigma2 = np.zeros((t_, n_))
xi = np.zeros((t_, n_))
for n in range(n_):
    param[:, n], sigma2[:, n], xi[:, n] = \
        fit_garch_fp(dx[:, n], p, rescale=True)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step04): Estimate marginal distributions by fitting a Student t distribution via MLFP

mu_marg = np.zeros(n_)
sigma2_marg = np.zeros(n_)
for n in range(n_):
Пример #3
0
# values
v = db_stocks_sp.values
vix = db_vix.values[:, 0]
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step01): Risk drivers identification

x = np.log(v)  # log-values
d_ = x.shape[1]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step02): Quest for invariance

# +
i_ = d_
epsi = np.zeros((t_, i_))
p_garch = exp_decay_fp(t_, tau_hl_garch)

for i in range(i_):
    print('Fitting ' + str(i+1) + '-th GARCH; ' +
          str(int((i+1)/i_*100)) + '% done.')
    _, _, epsi[:, i] = fit_garch_fp(np.diff(x[:, i], axis=0), p_garch)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step03): Historical estimation

# +
# time and state conditioning on smoothed and scored VIX returns

# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(vix))
# smoothing
Пример #4
0
# +
# time and state conditioning on smoothed and scored VIX returns

# state indicator: VIX compounded return realizations
db_vix['c_vix'] = np.log(db_vix).diff()
# extract data for analysis dates
c_vix = db_vix.c_vix[dates].values
# smoothing
z_smooth = smoothing(c_vix, tau_hl_smooth)
# scoring
z = scoring(z_smooth, tau_hl_score)
# target value
z_star = z[-1]
# prior probabilities
p_prior = exp_decay_fp(t_, tau_hl_prior)
# posterior probabilities
p = conditional_fp(z, z_star, alpha, p_prior)
# effective number of scenarios
ens = effective_num_scenarios(p)

print('Effective number of scenarios is', int(round(ens)))
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step03-implementation-step02): Estimate the marginal distributions for stocks, S&P 500 and implied volatility

# +
# invariants to be modeled parametrically
ind_parametric = np.arange(n_stocks + 1 + d_implvol,
                           n_stocks + 1 + d_implvol + i_bonds)
# invariants to be modeled nonparametrically
Пример #5
0
w = np.array([1 / 3, 1 / 3, 1 / 3])  # market-weighted portfolio

# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step00): Load data

path = '../../../databases/global-databases/equities/db_stocks_SP500/'
data = pd.read_csv(path + 'db_stocks_sp.csv', index_col=0, header=[0, 1])

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step01): Compute time series of returns

n_ = len(w)  # market dimension
r_t = data.pct_change().iloc[1:, :n_].values  # returns

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step02): Compute the sample mean and the exponential decay sample covariance

t_ = len(r_t)
p_t_tau_hl = exp_decay_fp(t_, tau_hl)  # exponential decay probabilities
mu_hat_r, sig2_hat_r = meancov_sp(r_t, p_t_tau_hl)  # sample mean and cov.

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step03): Compute Black-Litterman prior parameters

# +
# expectation in terms of market equilibrium
mu_r_equil = 2 * lam * sig2_hat_r @ w

tau = t_  # uncertainty level in the reference model
mu_m_pri = mu_r_equil
cv_pri_pred = (1 + 1 / tau) * sig2_hat_r
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step04): Compute vectors quantifying the views
Пример #6
0
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step01): Compute linear returns of both benchmark and securities

# +
# stocks return
r_stock = np.diff(v_stock, axis=0) / v_stock[:-1, :]

# S&P500 index return
r_sandp = np.diff(v_sandp, axis=0) / v_sandp[:-1]
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step02): Cov. matrix of the joint vector of stocks and bench. returns

# +
# exponential decay probabilities
p = exp_decay_fp(t_ - 1, tau_hl)

# HFP covariance
_, s2_r_stock_r_sandp = meancov_sp(
    np.concatenate((r_stock, r_sandp.reshape(-1, 1)), axis=1), p)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step03): Objective function

g = lambda s: obj_tracking_err(s2_r_stock_r_sandp, s)[1]

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step04): Portfolio selection via naive routine

s_star_naive = naive_selection(g, n_, k_)
g_te_naive = np.zeros(k_)
for k in np.arange(0, k_):
Пример #7
0
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-parameters)

tau_hl = 180  # half life
k_ = 25  # number of hidden factors

# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step00): Load data

path = '../../../databases/temporary-databases/'
xi = np.array(pd.read_csv(path + 'db_GARCH_residuals.csv', index_col=0))
t_, n_ = xi.shape

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step01): Compute the HFP correlation

p = exp_decay_fp(t_, tau_hl)
_, sigma2 = meancov_sp(xi, p)
c2, _ = cov_2_corr(sigma2)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step02): Compute the loadings and residual variances via PAF factor analysis

beta, delta2 = factor_analysis_paf(c2, k_)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step03): Compute the factor analysis correlation and the Frobenius norm

c2_paf = beta @ beta.T + np.diag(delta2)
d_fro = np.linalg.norm(c2 - c2_paf, ord='fro') / \
        np.linalg.norm(c2, ord='fro') * 100.

# ## Plots
Пример #8
0
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-implementation-step02): Historical estimation

# +
# time and state conditioning on smoothed and scored VIX returns
# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(vix))
# smoothing
z_vix = smoothing(c_vix, tau_hl_smooth)
# scoring
z_vix = scoring(z_vix, tau_hl_score)
# target value
z_vix_star = z_vix[-1]
# flexible probabilities
p_base = exp_decay_fp(len(dates), tau_hl_pri)
p = conditional_fp(z_vix, z_vix_star, alpha_leeway, p_base)

# HFP location and dispersion
mu_hat, sig2_hat = meancov_sp(x, p)
_, sig2_z_hat = meancov_sp(z, p)

# OLS loadings
_, beta_ols, sig2_u_ols, _ = fit_lfm_ols(x, z, p, fit_intercept=False)
r2_ols = multi_r2(sig2_u_ols, sig2_hat)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-implementation-step03): Maximum likelihood - GLM with normal assumption

# +
alpha_mlfp_norm, beta_mlfp_norm, sig2_u_mlfp_norm, _ = \
Пример #9
0
# select data within the date range
df_stocks = df_stocks.loc[df_stocks.index].tail(t_)

# select stock
df_stocks = df_stocks['AMZN']  # stock value
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step01): Compute the risk driver

x = np.log(np.array(df_stocks))  # log-value

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step02): Compute HFP mean and covariance

epsi = np.diff(x)  # invariant past realizations
p = exp_decay_fp(t_ - 1, tau_hl)  # exponential decay probabilities
mu_hat, sig2_hat = meancov_sp(epsi, p)  # HFP mean and covariance

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step03): Compute Monte Carlo paths of risk drivers

x_tnow_thor = simulate_bm(x[-1].reshape(1), delta_t_m, mu_hat.reshape(1),
                          sig2_hat.reshape((1, 1)), j_).squeeze()

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step04): Compute projected expectations and standard deviations

mu_thor = x[-1] + mu_hat * np.cumsum(delta_t_m)  # projected expectations
sig_thor = np.sqrt(sig2_hat) * np.sqrt(
    np.cumsum(delta_t_m))  # projected standard deviations

# ## Plots
Пример #10
0
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_lasso_vs_ridge-implementation-step01): Select stocks and SPX from database

v_stocks = np.array(spx_stocks.iloc[:, 1 + np.arange(k_)])  # select stocks
v_spx = np.array(spx_stocks.iloc[:, 0])

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_lasso_vs_ridge-implementation-step02): Compute linear returns of both SPX and stocks

x = np.diff(v_spx) / v_spx[:-1]  # benchmark
z = np.diff(v_stocks, axis=0) / v_stocks[:-1, :]  # factors
t_ = len(x)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_lasso_vs_ridge-implementation-step03): Set the flexible probabilities

p = exp_decay_fp(t_, tau_hl)  # exponential decay

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_lasso_vs_ridge-implementation-step04): Perform ridge regression

lambdagrid_ridge = np.linspace(0, lambda_ridge_max, l_)  # grid of penalties
beta_r = np.zeros((k_, l_))
for l in range(l_):
    # ridge regression
    _, beta_r[:, l], _, _ = fit_lfm_ridge(x, z, p, lambdagrid_ridge[l])

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_lasso_vs_ridge-implementation-step05): Perform lasso regression

lambdagrid_lasso = np.linspace(0, lambda_lasso_max, l_)  # grid of penalties
beta_l = np.zeros((k_, l_))
for l in range(l_):
    # lasso regression
Пример #11
0
# select data
df_stocks = df_stocks[stock].tail(t_)

# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_projection_stock_bootstrap-implementation-step01): Compute risk driver

# +
x = np.log(np.array(df_stocks))  # log-value
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_projection_stock_bootstrap-implementation-step02): HFP distribution of the invariant

# +
epsi = np.diff(x)  # historical scenarios
p = exp_decay_fp(t_ - 1, tau_hl)  # probabilities
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_projection_stock_bootstrap-implementation-step03): Generate scenarios of log-value via bootstrapping

# +
x_tnow_thor = simulate_rw_hfp(x[-1].reshape(1), epsi, p, j_, m_).squeeze()
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_projection_stock_bootstrap-implementation-step04): Evolution of expectation and standard deviation

# +
mu_thor = np.zeros(m_ + 1)
sig_thor = np.zeros(m_ + 1)
for m in range(0, m_ + 1):
    mu_thor[m], sig2_thor = meancov_sp(x_tnow_thor[:, m].reshape(-1, 1))
Пример #12
0
# +
y = np.array(df_y[tau.astype('str')])  # yields to maturity
if y.shape[0] > t_:
    y = y[-t_:, :]
else:
    t_ = y.shape[0]

# increments
dy = np.diff(y, 1, axis=0)  # t_ennd-1 increments
n_ = dy.shape[1]
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_kalman_filter_yield_curve-implementation-step02): Set flexible probabilities and compute effective number of scenarios

p = exp_decay_fp(dy.shape[0], tau_p)
p = p / np.sum(p)  # flexible probabilities
ens = effective_num_scenarios(p)  # effective number of scenarios

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_kalman_filter_yield_curve-implementation-step03): Estimate the evolution of first two Nelson-Siegel parameters

# Nelson-Siegel fit
theta = np.zeros((t_-1, 4))
theta[0, :] = fit_nelson_siegel_yield(tau, y[0, :], par_start)
for t in range(1, t_-1):
    theta[t, :] = fit_nelson_siegel_yield(tau, y[t, :], theta[t-1, :])

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_kalman_filter_yield_curve-implementation-step04): Estimate evolution of first two hidden factors of Kalman Filter

z_KF, alpha, beta, sig2_U, alpha_z, beta_z, sig2_z = fit_state_space(dy, k_, p)
x_rec = alpha + beta@z_KF[-1, :]  # last recovered increment
Пример #13
0
# +
times_to_maturity = np.round_(np.array([1, 2, 3, 5, 7, 8, 10]), 2)
path = '../../../databases/global-databases/fixed-income/db_yields/data.csv'
y_db = pd.read_csv(path, parse_dates=['dates'], skip_blank_lines=True)

y = y_db[times_to_maturity.astype(float).astype(str)].values
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_different_length_series-implementation-step01): Compute the swap rates daily changes

# daily changes
epsi = np.diff(y, 1, axis=0)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_different_length_series-implementation-step02): Flexible probabilities

p = exp_decay_fp(len(epsi), tau_hl)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_different_length_series-implementation-step03): Maximum likelihood with flexible probabilities - complete series

mu, s2 = fit_locdisp_mlfp(epsi, p=p, nu=nu, threshold=tol)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_different_length_series-implementation-step04): Drop the first portion of the observations from the 2yr and 5yr series

r = int(np.floor(len(epsi) * trunc))
epsi_dl = epsi.copy()
epsi_dl[:r, [1, 3]] = np.nan  # drop observations

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_different_length_series-implementation-step05): Maximum likelihood with flexible probabilities - different length

mu_dl, s2_dl = fit_locdisp_mlfp_difflength(epsi_dl, p=p, nu=nu, threshold=tol)
Пример #14
0
x_csco = np.log(np.array(stocks.CSCO))
x_ge = np.log(np.array(stocks.GE))

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_mlfp_ellipsoid_convergence-implementation-step02): Compute the invariants using a GARCH(1,1) fit

# +
_, _, epsi_csco = fit_garch_fp(np.diff(x_csco))
_, _, epsi_ge = fit_garch_fp(np.diff(x_ge))

epsi = np.array([epsi_csco, epsi_ge]).T
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_mlfp_ellipsoid_convergence-implementation-step03): Set the exp. decay probabilities for MLFP estimation and compute the effective number of scenarios

p = exp_decay_fp(len(epsi_csco), tau_hl)  # exp. decay flexible probabilities
ens = effective_num_scenarios(p)  # effective number of scenarios

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_mlfp_ellipsoid_convergence-implementation-step04): Perform the MLFP estimation

mu_mlfp, sig2_mlfp = fit_locdisp_mlfp(epsi,
                                      p=p,
                                      nu=nu,
                                      threshold=gamma,
                                      print_iter=True)

# ## Plots

# +
plt.style.use('arpm')
Пример #15
0
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_ens_exp_decay-parameters)

gamma = 10  # parameter for the generalized exponential of entropy
t_ = 500  # number of scenarios
tau_hl_max = np.floor(1.2 * t_)  # maximum half-life parameter
k_ = 50  # number of half-life parameters considered

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_ens_exp_decay-implementation-step01): Create a grid of half-life values for plotting

tau_hl_grid = np.linspace(1, tau_hl_max, num=k_)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_ens_exp_decay-implementation-step02): Compute exponential decay probabilities

p = np.zeros((k_, t_))
for k in range(k_):
    p[k] = exp_decay_fp(t_, tau_hl_grid[k])

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_ens_exp_decay-implementation-step03): Compute effective number of scenarios

ens = np.zeros(len(tau_hl_grid))
ens_gamma = np.zeros(k_)
for k in range(k_):
    ens[k] = effective_num_scenarios(p[k])
    ens_gamma[k] = effective_num_scenarios(p[k],
                                           type_ent='gen_exp',
                                           gamma=gamma)

# ## Plots

# +
plt.style.use('arpm')