Exemple #1
0
 def intra_cluster_variance(sigma):
     sigma_atm_thor_id = sigma_atm_thor > sigma
     r_sandp_given_0 = r_sandp[~sigma_atm_thor_id]
     r_sandp_given_1 = r_sandp[sigma_atm_thor_id]
     _, cv_r_sandp_given_0 = meancov_sp(r_sandp_given_0)
     _, cv_r_sandp_given_1 = meancov_sp(r_sandp_given_1)
     p = r_sandp_given_1.shape[0] / j_
     return (1 - p) * cv_r_sandp_given_0 + p * cv_r_sandp_given_1
p = exp_decay_fp(t_ - 1, tau_hl)  # probabilities
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_projection_stock_bootstrap-implementation-step03): Generate scenarios of log-value via bootstrapping

# +
x_tnow_thor = simulate_rw_hfp(x[-1].reshape(1), epsi, p, j_, m_).squeeze()
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_projection_stock_bootstrap-implementation-step04): Evolution of expectation and standard deviation

# +
mu_thor = np.zeros(m_ + 1)
sig_thor = np.zeros(m_ + 1)
for m in range(0, m_ + 1):
    mu_thor[m], sig2_thor = meancov_sp(x_tnow_thor[:, m].reshape(-1, 1))
    sig_thor[m] = np.sqrt(sig2_thor)
# -

# ## Plots

# +
# preliminary settings
plt.style.use('arpm')
mydpi = 72.0
lgrey = [0.8, 0.8, 0.8]  # light grey
dgrey = [0.2, 0.2, 0.2]  # dark grey
t_m = np.arange(0, m_ + 1)
j_plot = 40  # number of paths to be plotted
h, b = histogram_sp(x_tnow_thor[:, -1], k_=10 * np.log(j_))
fig, ax = plt.subplots()
# number of grid points
i_ = 40
# grid points
r_sandp_min, r_sandp_max = r_sandp.min(), r_sandp.max()
z_grid = np.linspace(r_sandp_min, r_sandp_max, i_)
# kernel bandwidth
h = (r_sandp_max - r_sandp_min) / 50
# smoothing parameter
gamma = 2
# compute mean regression predictor on the grid
e_r_call_tnow_thor_given_z_grid = np.ones(i_) * np.nan
for i, z_i in enumerate(z_grid):
    # smooth kernel conditional probabilities
    p_given_z_i = smooth_kernel_fp(r_sandp, z_i, h, gamma)
    # conditional expactation
    e_r_call_tnow_thor_given_z_grid[i], _ = meancov_sp(r_call, p_given_z_i)


# mean regression predictor as piecewise linear interpolation
def chi(z):
    return np.interp(z, z_grid, e_r_call_tnow_thor_given_z_grid)


# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_call_mean_regression-implementation-step03): Evaluate prediction and residuals

r_bar_call = chi(r_sandp)  # prediction
u = r_call - r_bar_call  # residuals

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_call_mean_regression-implementation-step04): Compute payoff at t_end and current value of the call option as a function of the underlying S&P500
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_executive_summary-implementation-step01): Risk drivers identification

# Compute the time series of the log values
x_t = np.log(v_n_t)
x_tnow = x_t[-1, :]  # current value of the risk drivers

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_executive_summary-implementation-step02): Quest for Invariance

# extract the realized time series of the invariants (log-returns)
eps_t = np.diff(x_t, axis=0)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_executive_summary-implementation-step03): Estimation

# estimate sample mean and sample covariance
mu, sigma2 = meancov_sp(eps_t)
rho_1_2 = sigma2[0, 1] / np.sqrt(sigma2[0, 0] * sigma2[1, 1])

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_executive_summary-implementation-step04): Projection

# compute location and dispersion parameters
mu_x_thor = x_tnow + deltat * mu
sigma2_x_thor = deltat * sigma2

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_executive_summary-implementation-step05): Pricing

# compute parameters of bivariate normal distribution of the ex-ante P&L
v_stocks_tnow = v_n_t[-1, :2]
x_stocks_tnow = x_tnow[:2]
mu_stocks_x_thor = mu_x_thor[:2].copy()
sigma2_stocks_x_thor = sigma2_x_thor[:2, :2]
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_hedging-implementation-step02): Compute the Black-Scholes-Merton delta

tau = np.busday_count(t_m[0], t_end) / 252
r_rf = 1 / np.exp(-y_rf * m_) - 1  # risk-free rate
sigma = np.exp(log_sigma)
d1 = (np.log(v_stock_u[0, 0]/k_strike) + (r_rf+sigma**2/2)*tau) /\
                            (sigma * np.sqrt(tau))
delta_bms = norm.cdf(d1)  # BMS delta
beta_bms = delta_bms * v_stock_u[0, 0] / v_stock_u[0, 0]  # BMS beta

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_hedging-implementation-step03): Compute the factors on demand delta at the selected horizon

# stock linear return
r_stock = (v_stock_u[:, 1:] - v_stock_u[:, [0]]) / v_stock_u[:, [0]]
rr = np.squeeze(np.r_['-1', r_call[:, [-index]], r_stock[:, [-index]]])
expectation, cov = meancov_sp(rr)
beta_fod = cov[0, 1] / cov[1, 1]  # FOD beta
delta_fod = beta_fod * vcall_u[0, 0] / v_stock_u[0, 0]  # FOD delta

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_hedging-implementation-step04): Compute the return of the hedged portfolio

# BMS return of hedged portfolio
r_bms = r_call[:, -1] - beta_bms * r_stock[:, -index]
# FOD return of hedged portfolio
r_fod = r_call[:, -1] - beta_fod * r_stock[:, -index]

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_hedging-implementation-step05): Compute the Black-Scholes-Merton curve and payoff

# +
s = np.arange(600, 1801)  # range of values for underlying
l_ = len(s)
Exemple #6
0
# +
# time and state conditioning on smoothed and scored VIX returns

# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(vix))
# smoothing
z_vix = smoothing(c_vix, tau_hl_smooth)
# scoring
z_vix = scoring(z_vix, tau_hl_score)
# target value
z_vix_star = z_vix[-1]
# flexible probabilities
p_pri = exp_decay_fp(len(dates), tau_hl_pri)
p = conditional_fp(z_vix, z_vix_star, alpha_leeway, p_pri)

mu_hat, sig2_hat = meancov_sp(epsi, p)
# -

# ## Save database

# +
out = pd.DataFrame({stocks_names[i]: epsi[:, i]
                    for i in range(i_)}, index=dates)
out = out[list(stocks_names[:i_])]
out.index.name = 'dates'
out.to_csv('../../../databases/temporary-databases/db_fit_garch_stocks_epsi.csv')

out = pd.DataFrame({'mu_hat': pd.Series(mu_hat.reshape(-1)),
                    'sig2_hat': pd.Series(sig2_hat.reshape(-1))})
out.to_csv(
          '../../../databases/temporary-databases/db_fit_garch_stocks_locdisp.csv',
from arpym.statistics import meancov_sp
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_scen_prob-parameters)

k_ = 2  # number of risk factors
rh_z = np.array([[-0.01057143, -0.0041252, -0.01986819],
               [-0.02405714, -0.00980853, 0.01450357],
               [0.00657143, -0.00406089, 0.01188747],
               [0.01925714, 0.02680999, 0.00541017]])   # scenario realizations
p = np.array([0.3, 0.1, 0.4, 0.2])  # probabilities
j_ = p.shape[0]

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_scen_prob-implementation-step01): Top-down exposures

m_rh_z, s_rh_z = meancov_sp(rh_z, p)  # scenario-probability mean and covariance
# top-down exposures
beta = s_rh_z[0, 1:]@(np.linalg.solve(s_rh_z[1:, 1:], np.eye(k_)))

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_scen_prob-implementation-step02): Shift term

alpha = m_rh_z[0] - beta@m_rh_z[1:]

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_scen_prob-implementation-step03): Scenarios for the residuals

u = rh_z[:, 0] - alpha - beta@rh_z[:, 1:].T

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_attribution_scen_prob-implementation-step04): Joint scenarios for U,Z

uz = np.r_['-1', u[np.newaxis, ...].T, rh_z[:, 1:3]]
Exemple #8
0
# extract values of the call option and the underlying at t_now and t_hor
v_call_tnow_kstrk_tend = v_call_kstrk_tend[0, 0]
v_call_thor_kstrk_tend = v_call_kstrk_tend[:, delta_t-1] # scenarios at the horizon of the call option
v_sandp_tnow = np.exp(log_v_sandp[0, 0])
v_sandp_thor = np.exp(log_v_sandp[:, delta_t-1])  # scenarios at the horizon S&P 500 index

# compute returns of the call option and the underlying between t_now and t_hor
r_call = (v_call_thor_kstrk_tend/v_call_tnow_kstrk_tend - 1)
r_sandp = (v_sandp_thor/v_sandp_tnow - 1)
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_call_mean_lin_regression-implementation-step02): Find the best linear predictor

# +
# expectation and covariance of the joint returns
e_r_call_r_sandp, cov_r_call_r_sandp = meancov_sp(np.c_[r_call, r_sandp])
# parameters of the linear mean regression predictor
beta = cov_r_call_r_sandp[0, 1]/cov_r_call_r_sandp[1, 1]
alpha = e_r_call_r_sandp[0]-beta*e_r_call_r_sandp[1]

# linear mean regression predictor
def chi_alpha_beta(z):
    return alpha + beta*np.array(z)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_call_mean_lin_regression-implementation-step03): Evaluate prediction and residuals

r_bar_call = chi_alpha_beta(r_sandp)  # prediction
u = r_call-r_bar_call  # residuals

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_call_mean_lin_regression-implementation-step04): Compute payoff at t_end and current value of the call option as a function of the underlying S&P500
Exemple #9
0
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-parameters)

tau_hl = 180  # half life
k_ = 25  # number of hidden factors

# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step00): Load data

path = '../../../databases/temporary-databases/'
xi = np.array(pd.read_csv(path + 'db_GARCH_residuals.csv', index_col=0))
t_, n_ = xi.shape

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step01): Compute the HFP correlation

p = exp_decay_fp(t_, tau_hl)
_, sigma2 = meancov_sp(xi, p)
c2, _ = cov_2_corr(sigma2)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step02): Compute the loadings and residual variances via PAF factor analysis

beta, delta2 = factor_analysis_paf(c2, k_)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step03): Compute the factor analysis correlation and the Frobenius norm

c2_paf = beta @ beta.T + np.diag(delta2)
d_fro = np.linalg.norm(c2 - c2_paf, ord='fro') / \
        np.linalg.norm(c2, ord='fro') * 100.

# ## Plots

# +
Exemple #10
0
v_tnow = v_thor[:, 0].reshape(-1, 1)
pl_thor = v_thor - v_tnow + np.c_[np.zeros(j_), cf_thor]

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_couponbond-implementation-step06): Scenario-probability expectations and standard deviations

# +
mu_v_thor = np.zeros(m_ + 1)
sig_v_thor = np.zeros(m_ + 1)
mu_cf_thor = np.zeros(m_)
sig_cf_thor = np.zeros(m_)
mu_pl_thor = np.zeros(m_ + 1)
sig_pl_thor = np.zeros(m_ + 1)

# probabilities
for m in range(len(t_m)):
    mu_v_thor[m], sig1 = meancov_sp(v_thor[:, m].reshape(-1, 1))
    sig_v_thor[m] = np.sqrt(sig1)

    mu_pl_thor[m], sig1 = meancov_sp(pl_thor[:, m].reshape(-1, 1))
    sig_pl_thor[m] = np.sqrt(sig1)

for m in range(len(t_m) - 1):
    mu_cf_thor[m], sig1 = meancov_sp(cf_thor[:, m].reshape(-1, 1))
    sig_cf_thor[m] = np.sqrt(sig1)

# -

# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_couponbond-implementation-step07): Average yield scenarios at the horizon

if yields:
    y_bar = np.mean(shadowrates_ytm(x_tnow_thor), axis=2)
Exemple #11
0
mu_ineq = mu_view_ineq(-mu_x_base, sig_x_base, -sk_x_base)

z_eq = v(x)[1:]
mu_view_eq = mu_view_eq(-mu_x_base, sig_x_base)

p_upd = min_rel_entropy_sp(p_base,
                           z_ineq,
                           mu_ineq,
                           z_eq,
                           mu_view_eq,
                           normalize=False)
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_entropy_view-implementation-step04): Compute expectations, standard deviations and skewness of updated distribution

mu_upd, sig2_upd = meancov_sp(x, p_upd)
sig_upd = np.sqrt(sig2_upd)
sk_upd = ((x - mu_upd)**3) @ p_upd / sig_upd**3

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_entropy_view-implementation-step05): Compute confidence-weighted probabilities

p_c_add = c * p_upd + (1 - c) * p_base
p_c_mul = p_upd ** c * p_base ** (1 - c) /\
    np.sum(p_upd ** c * p_base ** (1 - c))

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_entropy_view-implementation-step06): Compute expectations, standard deviations and skewness of confidence-weighted distributions

# +
mu_c_add, sig2_c_add = meancov_sp(x, p_c_add)
sig_c_add = np.sqrt(sig2_c_add)
sk_add = ((x - mu_c_add)**3) @ p_c_add / sig_c_add**3
Exemple #12
0
def sigf(z1, z2):
    return np.sqrt(np.minimum(z1**2, 1 / (10 * np.pi)))


z = np.random.multivariate_normal(mu_z, sigma2_z, n_samples)
psi_z = np.c_[z[:, 0], z[:, 0] * z[:, 1]]
x = muf(z[:, 0], z[:, 1]) +\
       sigf(z[:, 0], z[:, 1]) * np.random.randn(n_samples)

x = np.heaviside(x, 1)
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_linclass_fda-implementation-step02): Fisher discriminant analysis

# +
e_z0, cv_z0 = meancov_sp(psi_z[x == 0])
e_z1, cv_z1 = meancov_sp(psi_z[x == 1])
p = len(x[x == 1]) / n_samples
e_cv_zx = (1 - p) * cv_z0 + p * cv_z1

gamma = np.linalg.norm(np.linalg.solve(e_cv_zx, e_z1 - e_z0))
beta = (1 / gamma) * np.linalg.solve(e_cv_zx, e_z1 - e_z0)
alpha = (1 / gamma) * logit(p) - beta @ (e_z1 + e_z0) / 2


def chi_alphabeta(z):
    return np.heaviside(alpha + z @ beta.T, 1)


# -
# select data within the date range
df_stocks = df_stocks.loc[df_stocks.index].tail(t_)

# select stock
df_stocks = df_stocks['AMZN']  # stock value
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step01): Compute the risk driver

x = np.log(np.array(df_stocks))  # log-value

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step02): Compute HFP mean and covariance

epsi = np.diff(x)  # invariant past realizations
p = exp_decay_fp(t_ - 1, tau_hl)  # exponential decay probabilities
mu_hat, sig2_hat = meancov_sp(epsi, p)  # HFP mean and covariance

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step03): Compute Monte Carlo paths of risk drivers

x_tnow_thor = simulate_bm(x[-1].reshape(1), delta_t_m, mu_hat.reshape(1),
                          sig2_hat.reshape((1, 1)), j_).squeeze()

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_projection_brownian_motion-implementation-step04): Compute projected expectations and standard deviations

mu_thor = x[-1] + mu_hat * np.cumsum(delta_t_m)  # projected expectations
sig_thor = np.sqrt(sig2_hat) * np.sqrt(
    np.cumsum(delta_t_m))  # projected standard deviations

# ## Plots

# +
Exemple #14
0
from mpl_toolkits import mplot3d

from arpym.estimation import cov_2_corr
from arpym.statistics import meancov_sp, pdf_sp
from arpym.tools import add_logo
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-parameters)

h2 = 0.01  # bandwidth
x = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]).T  # joint scenarios
p = np.array([0.33, 0.10, 0.20, 0.37])  # probabilities

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-implementation-step01): Compute expectation and covariance

m, s2 = meancov_sp(x, p)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-implementation-step02): Compute correlation matrix

c2, _ = cov_2_corr(s2)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-implementation-step03): Compute the scenario-probability pdf

# +
# grid points for pdf evaluation
x1_grid = np.arange(np.min(x[:, 0]) - 0.5, np.max(x[:, 0]) + 0.5, 0.025)
x2_grid = np.arange(np.min(x[:, 1]) - 0.5, np.max(x[:, 1]) + 0.5, 0.025)
x_grid = np.array([[x1, x2] for x1 in x1_grid for x2 in x2_grid])

# scenario-probability pdf
f = pdf_sp(h2, x_grid, x, p)
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_characteristic_port_rev-implementation-step06): realized factor premium

lambda_hat = np.zeros(t_)
for t in range(0, t_):
    lambda_hat[t], _ = ewm_meancov(z_char[:t + 1], tau_hl_lam)

# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_characteristic_port_rev-implementation-step07): return vs expected returns, symm. regression line

# +
exp_ret = lambda_hat[-1] * beta[-1, :] / v[-2, :]
real_ret = pl_real[-1, :] / v[-2, :]

# symmetric regression
exp_real = np.concatenate((exp_ret.reshape(-1, 1), real_ret.reshape(-1, 1)),
                          axis=1)
mu_exp_real, sig2_exp_real = meancov_sp(exp_real)
e, l = pca_cov(sig2_exp_real)
mu_real = mu_exp_real[1]
mu_exp = mu_exp_real[0]
beta_sym = -e[1, 1] / e[0, 1]
alpha_sym = mu_exp - beta_sym * mu_real
x = 2 * np.arange(-10, 11) / 10
y = beta_sym * x + alpha_sym
# -

# ## Save characteristics portfolios

output = {
    'w_shift': pd.Series(w_shift),
    'h_char': pd.Series(h_char.reshape(t_ * n_)),
    'n_': pd.Series(n_)
traj = np.zeros((n_, l_, k_))

# size of parent order
delta_h_parent = (h_qend - h_qnow).astype('int')
# beta grid
beta = np.linspace(alpha / (1 + alpha), 1, l_ + 1, endpoint=True)
beta = beta[1:]
# q grid
q_grid = np.linspace(q_now, q_end, k_)

for n in range(n_):
    if delta_h_parent[n] == 0:
        # no change in holdings
        traj[n, :, :] = np.tile(h_qend[n], (l_, k_))
    else:
        _, sigma2[n] = meancov_sp(pi_oneday[:, n], p)
        for l in range(l_):
            # expected P&L
            xi[l] = beta[l]**(alpha + 1) / (beta[l] + beta[l] * alpha - alpha)
            mean_pihat[n, l] = gamma/2*(h_qend[n]**2 - h_qnow[n]**2) - \
                eta*xi[l]*np.abs(delta_h_parent[n])**(1+alpha) * \
                (q_end-q_now)**(-alpha)
            # P&L variance
            variance_pihat[n, l] = sigma2[n] * (q_end-q_now) * \
                (h_qnow[n]**2 + 2*h_qnow[n]*delta_h_parent[n]/(beta[l]+1) +
                (delta_h_parent[n]**2)/(2*beta[l]+1))
            # trajectory
            traj[n, l, :] = h_qnow[n] + \
                ((q_grid-q_now)/(q_end-q_now))**beta[l]*delta_h_parent[n]
# -
Exemple #17
0
    y_h = db_exante_perf.values.squeeze()

    # Ex-ante evaluation
    db_quantile_and_satis = pd.read_csv(path+'db_quantile_and_satis_historical.csv')
    c_es = db_quantile_and_satis['c_es'][0]
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step09-implementation-step01): Solving the first step of the mean-variance approach

# +
# define set of parameters for mean-variance frontier
lambda_grid = np.arange(lambda_inf, lambda_sup, lambda_step)
l_ = lambda_grid.shape[0]

# compute expectation and covariance of the P&L
exp_pi, cov_pi = meancov_sp(pi_tnow_thor, p)

# set constraints

# equality constraints
# budget constraint: h'*v_tnow = v_h_tnow
a_budget = v_tnow.reshape(1, -1)
b_budget = np.array(v_h_tnow)
# constraint: do not invest in the S&P
a_sp = np.zeros((1, n_))
a_sp[0, n_stocks] = 1
b_sp = np.array(0)
# combine equality constraints
a = cvxopt.matrix(np.r_[a_budget, a_sp])
b = cvxopt.matrix(np.r_[b_budget, b_sp])
Exemple #18
0
# +
# time and state conditioning on smoothed and scored VIX returns
# state indicator: VIX compounded return realizations
c_vix = np.diff(np.log(vix))
# smoothing
z_vix = smoothing(c_vix, tau_hl_smooth)
# scoring
z_vix = scoring(z_vix, tau_hl_score)
# target value
z_vix_star = z_vix[-1]
# flexible probabilities
p_base = exp_decay_fp(len(dates), tau_hl_pri)
p = conditional_fp(z_vix, z_vix_star, alpha_leeway, p_base)

# HFP location and dispersion
mu_hat, sig2_hat = meancov_sp(x, p)
_, sig2_z_hat = meancov_sp(z, p)

# OLS loadings
_, beta_ols, sig2_u_ols, _ = fit_lfm_ols(x, z, p, fit_intercept=False)
r2_ols = multi_r2(sig2_u_ols, sig2_hat)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-implementation-step03): Maximum likelihood - GLM with normal assumption

# +
alpha_mlfp_norm, beta_mlfp_norm, sig2_u_mlfp_norm, _ = \
    fit_lfm_mlfp(x, z, p, 10**9)

# compute r-squared
u_mlfp_norm = x - alpha_mlfp_norm - z @ beta_mlfp_norm.T
Exemple #19
0
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_couponbond_taylor-implementation-step03): Scenarios for bond normalized P&L

y_t_now = y_t_now_t_hor[[0], 0, :]
v_t_now = bond_value(t_m[0], coupon, r, 'y', y_t_now, tau_x=tau)
v_t_now = v_thor[:, 0].reshape(-1, 1)
r_t_hor = (v_thor - v_t_now) / v_t_now

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_couponbond_taylor-implementation-step04): Scenario-probability expectations and standard deviations

# +
mu_r_t_hor = np.zeros(m_ + 1)
sig_r_t_hor = np.zeros(m_ + 1)

for m in range(len(t_m)):
    mu_r_t_hor[m], sig1 = meancov_sp(r_t_hor[:, m].reshape(-1, 1))
    sig_r_t_hor[m] = np.sqrt(sig1)
# -

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_couponbond_taylor-implementation-step05): Numerical greeks: yield, eff. key rate durations and convexities

# +
# numerical yield
v_t_now = v_t_now[0]
dt = 1 / 252  # numerical differentiation step (yield)
y_hat_tnow = (bond_value(
    np.busday_offset(t_m[0], dt * 252), coupon, r, 'y', y_t_now, tau_x=tau) -
              v_t_now) / (dt * v_t_now)

# effective key rate durations and convexities
dy_vec = dy * np.eye(d_)
kend_x = 2 / np.pi * np.arcsin(rho_12)
kend_x1v_put_h = -1
kend_x1v_call_h = 1
kend_x2v_put_h = -2 / np.pi * np.arcsin(rho_12)
kend_x2v_call_h = 2 / np.pi * np.arcsin(rho_12)

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_dependence_structure_call_put-implementation-step05): Compute Spearman rho

# compute grades scenarios
u_x, _, _ = cop_marg_sep(x)
u_v_p, _, _ = cop_marg_sep(v_put_h)
u_v_c, _, _ = cop_marg_sep(v_call_h)

# Spearman rho
_, cov_u = meancov_sp(np.c_[u_x[:, 0], u_v_p, u_v_c, u_x[:, 1]])
spear, _ = cov_2_corr(cov_u)

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_dependence_structure_call_put-implementation-step06): Compute correlation

cov = np.cov(np.c_[x[:, 0], v_put_h, v_call_h, x[:, 1]].T)
corr, _ = cov_2_corr(cov)

# ## Plots

# +
plt.style.use('arpm')
violet = [170/255,	 85/255, 187/255]
teal = [60/255,	 149/255, 145/255]

f = plt.figure()
Exemple #21
0
        return (1 - p) * cv_r_sandp_given_0 + p * cv_r_sandp_given_1

    sigma_ = minimize_scalar(intra_cluster_variance,
                             bounds=(sigma_atm_thor.min(),
                                     sigma_atm_thor.max()),
                             method='bounded').x

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_sp_anova-implementation-step03): Find the best predictor

# +
# scenarios of the conditional
sigma_atm_thor_id = np.abs(sigma_atm_thor) > sigma_
r_sandp_given_0 = r_sandp[~sigma_atm_thor_id]
r_sandp_given_1 = r_sandp[sigma_atm_thor_id]
# conditional expectation
m_x_0, _ = meancov_sp(r_sandp_given_0)
m_x_1, _ = meancov_sp(r_sandp_given_1)


# ANOVA predictor
def chi(z):
    return m_x_0 * (z <= sigma_) + m_x_1 * (z > sigma_)


# -

# ## Plots:

# +
plt.style.use('arpm')
Exemple #22
0
                              x_proj[j, m, 1:], m_moneyness, tau_implvol,
                              k_strk, t_end, t_m[m])
        # compute log-implied volatility at the moneyness
        log_sigma_atm[j, m] = \
            interpolate.LinearNDInterpolator(points,
                                             x_proj[j, m, 1:])(*np.r_[tau, 0])
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_calloption-implementation-step02): Scenario-probability expectations and standard deviations

# +
mu_v = np.zeros(m_+1)
sig_v = np.zeros(m_+1)

for m in range(len(t_m)):
    mu_v[m], sig1 = meancov_sp(v_call_thor[:, m].reshape(-1, 1))
    sig_v[m] = np.sqrt(sig1)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_pricing_calloption-implementation-step03): Save databases

# +
output = {'j_': pd.Series(j_),
          'k_strike': pd.Series(k_strk),
          't_end': pd.Series(t_end),
          'm_': pd.Series(m_),
          'y_rf': pd.Series(y),
          't_m': pd.Series(t_m),
          'log_s': pd.Series(x_proj[:, :, 0].reshape((j_*(m_+1),))),
          'v_call_thor': pd.Series(v_call_thor.reshape((j_*(m_+1),))),
          'log_sigma_atm': pd.Series(log_sigma_atm.reshape((j_*(m_+1),)))}
df = pd.read_csv(path + 'db_pric_options.csv', index_col=0)

pi_call = np.array(df['pi_call'])  # call option P&L scenarios
pi_put = np.array(df['pi_put'])  # put option P&L scenarios
p = np.array(df['p'])  # probabilities
dates = np.array(df.index.values)  # dates
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_aggregation_options_hfp-implementation-step01): Compute the portfolio P&L scenarios and distribution

# +
pi_h = h.T @ np.r_[[pi_call], [pi_put]]  # portfolio P&L scenarios
ens = effective_num_scenarios(p)  # effective number scenarios

# mean and standard deviation of the portfolio P&L distribution
[mu_pi_h, sigma2_pi_h] = meancov_sp(pi_h, p)
sigma_pi_h = np.sqrt(sigma2_pi_h)

# mean and standard deviation of the call option P&L distribution
[mu_pi_call, sigma2_pi_call] = meancov_sp(pi_call, p)
sigma_pi_call = np.sqrt(sigma2_pi_call)

# mean and standard deviation of the put option P&L distribution
[mu_pi_put, sigma2_pi_put] = meancov_sp(pi_put, p)
sigma_pi_put = np.sqrt(sigma2_pi_put)
# -

# ## Plots

# +
plt.style.use('arpm')
Exemple #24
0
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step00): Load data

path = '../../../databases/global-databases/equities/db_stocks_SP500/'
data = pd.read_csv(path + 'db_stocks_sp.csv', index_col=0, header=[0, 1])

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step01): Compute time series of returns

n_ = len(w)  # market dimension
r_t = data.pct_change().iloc[1:, :n_].values  # returns

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step02): Compute the sample mean and the exponential decay sample covariance

t_ = len(r_t)
p_t_tau_hl = exp_decay_fp(t_, tau_hl)  # exponential decay probabilities
mu_hat_r, sig2_hat_r = meancov_sp(r_t, p_t_tau_hl)  # sample mean and cov.

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step03): Compute Black-Litterman prior parameters

# +
# expectation in terms of market equilibrium
mu_r_equil = 2 * lam * sig2_hat_r @ w

tau = t_  # uncertainty level in the reference model
mu_m_pri = mu_r_equil
cv_pri_pred = (1 + 1 / tau) * sig2_hat_r
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step04): Compute vectors quantifying the views

i = v @ mu_r_equil + eta * np.sqrt(np.diag(v @ sig2_hat_r @ v.T))
Exemple #25
0
path = '../../../databases/global-databases/equities/db_stocks_SP500/'
data = pd.read_csv(path + 'db_stocks_sp.csv',
                   index_col=0,
                   header=[0, 1],
                   parse_dates=True)
n_ = len(data.columns) - 1
v = data.iloc[:, 1:n_ + 1].values

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step01): Compute linear returns of stocks

x = v[1:, :] / v[:-1, :] - 1  # linear returns
t_ = x.shape[0]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step02): Estimate expectation and covariance of X and define sigma matrix

m_x_hat, s2_x_hat = meancov_sp(x)  # HFP moments
sigma2 = np.diag(np.diag(s2_x_hat))  # scale matrix
sigma = np.sqrt(sigma2)
sigma_inv = np.diag(1 / np.diag(sigma))

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step03): Compute principal component decomposition

e_hat, lambda2_hat = pca_cov(sigma_inv @ s2_x_hat @ sigma_inv)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step04): Estimate the loadings, the factor extraction matrix and shift

alpha_hat_pc = m_x_hat  # shift
beta_hat_pc = sigma @ e_hat[:, :k_]  # loadings
gamma_hat_pc = e_hat[:, :k_].T @ sigma_inv  # construction matrix

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step05): Compute the factor realizations and their expectation and covariance
Exemple #26
0
# +
# stocks return
r_stock = np.diff(v_stock, axis=0) / v_stock[:-1, :]

# S&P500 index return
r_sandp = np.diff(v_sandp, axis=0) / v_sandp[:-1]
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step02): Cov. matrix of the joint vector of stocks and bench. returns

# +
# exponential decay probabilities
p = exp_decay_fp(t_ - 1, tau_hl)

# HFP covariance
_, s2_r_stock_r_sandp = meancov_sp(
    np.concatenate((r_stock, r_sandp.reshape(-1, 1)), axis=1), p)
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step03): Objective function

g = lambda s: obj_tracking_err(s2_r_stock_r_sandp, s)[1]

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step04): Portfolio selection via naive routine

s_star_naive = naive_selection(g, n_, k_)
g_te_naive = np.zeros(k_)
for k in np.arange(0, k_):
    g_te_naive[k] = g(s_star_naive[k])

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_stock_selection-implementation-step05): Portfolio selection via forward stepwise routine
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_lfm_executive_summary-implementation-step01): Compute portfolio returns, S&P index returns and loadings of regression LFM

# returns of the 2 stocks
r_n_t = v_stocks[1:, :] / v_stocks[:-1, :] - 1
# curent portfolio value
v_h_tnow = v_stocks[-1, :].dot(h)
# portfolio weights
w_tnow = v_stocks[-1, :2] * h / v_h_tnow
# portfolio returns
x_t = np.sum(w_tnow * r_n_t, axis=1)
# S&P 500 returns
z_t = v_sandp[1:] / v_sandp[:-1] - 1
# LFM parameters
m_xz, s2_xz = meancov_sp(np.array([x_t, z_t]).T)
beta_reg = s2_xz[0, 1] / s2_xz[1, 1]
alpha_reg = m_xz[0] - beta_reg * m_xz[1]
x_pred = alpha_reg + beta_reg * z_pstat

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_lfm_executive_summary-implementation-step02): Fit logistic model and yield prediction for last observation

x_t_plus_1_binary = (x_t[1:] > 0).astype(int)
p = np.count_nonzero(x_t_plus_1_binary) / len(x_t_plus_1_binary)
logistic = LogisticRegression(penalty='l2',
                              C=np.inf,
                              class_weight='balanced',
                              solver='lbfgs',
                              random_state=1,
                              fit_intercept=1)
poly = PolynomialFeatures(degree=3, include_bias=False)
Exemple #28
0
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_reg_truncated_lfm-implementation-step01): Compute linear returns of X and Z

v_stock = data.values
x = (v_stock[1:, :] - v_stock[:-1, :]) / v_stock[:-1, :]
v_sector = idx_sector.values
z = (v_sector[1:, :] - v_sector[:-1, :]) / v_sector[:-1, :]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_reg_truncated_lfm-implementation-step02): Compute OLSFP estimates and residuals

alpha, beta, s2, u = fit_lfm_ols(x, z)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_reg_truncated_lfm-implementation-step03): Compute the joint covariance and correlation

# +
# compute covariance
[mu_uz, sig2_uz] = meancov_sp(np.hstack((u, z)))
sig2_u = sig2_uz[:n_, :n_]
sig2_z = sig2_uz[n_:, n_:]

# compute correlation
c2_uz, _ = cov_2_corr(sig2_uz)
c_uz = c2_uz[:n_, n_:]
c2_u = np.tril(c2_uz[:n_, :n_], -1)
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_reg_truncated_lfm-implementation-step04): Compute standard deviations of two portfolios

# +
w_1 = np.ones(n_) / n_  # equal weight portfolio
w_2 = np.zeros(n_)  # long/shoft portfolio
w_2[n_long] = 0.69158715
Exemple #29
0
def min_rel_entropy_sp(p_pri, z_ineq=None, mu_view_ineq=None, z_eq=None, mu_view_eq=None,
                       normalize=True):
    """For details, see here.

    Note
    ----
        The constaints :math:`p_j \geq 0` and :math:`\sum p_j = 1` are set
        automatically.

    Parameters
    ----------
        p_pri : array, shape(j_,)
        z_ineq : array, shape(l_, j_), optional
        mu_view_ineq : array, shape(l_,), optional
        z_eq : array, shape(m_, j_), optional
        mu_view_eq : array, shape(m_,), optional
        normalize : bool, optional

    Returns
    -------
        p_ : array, shape(j_,)
    """

    # Step 1: Concatenate the constraints and concatenated constraints

    if z_ineq is None and z_eq is None:
        # if there is no constraint, then just return p_pri
        return p_pri
    elif z_ineq is None:
        # no inequality constraints
        z = z_eq
        mu_view = mu_view_eq
        l_ = 0
        m_ = len(mu_view_eq)
    elif z_eq is None:
        # no equality constraints
        z = z_ineq
        mu_view = mu_view_ineq
        l_ = len(mu_view_ineq)
        m_ = 0
    else:
        z = np.concatenate((z_ineq, z_eq), axis=0)
        mu_view = np.concatenate((mu_view_ineq, mu_view_eq), axis=0)
        l_ = len(mu_view_ineq)
        m_ = len(mu_view_eq)

    if normalize is True:
        # normalize the constraints
        m_z, s2_z = meancov_sp(z.T)
        s_z = np.sqrt(np.diag(s2_z))
        z = ((z.T - m_z) / s_z).T
        mu_view = (mu_view - m_z) / s_z

    # Step 2: Compute the Lagrange dual function, gradient and Hessian

    # pdf of a discrete exponential family
    def exp_family(theta):
        x = theta @ z + np.log(p_pri)
        phi = logsumexp(x)
        p = np.exp(x - phi)
        p[p < 1e-32] = 1e-32
        p = p / np.sum(p)
        return p

    # minus dual Lagrangian
    def lagrangian(theta):
        x = theta @ z + np.log(p_pri)
        phi = logsumexp(x)  # stable computation of log sum exp
        return phi - theta @ mu_view

    def gradient(theta):
        return z @ exp_family(theta) - mu_view

    def hessian(theta):
        p = exp_family(theta)
        z_ = z.T - z @ p
        return (z_.T * p) @ z_

    # Step 3: Compute optimal Lagrange multipliers and the posterior probabilities

    k_ = l_ + m_  # dimension of the Lagrange dual problem
    theta0 = np.zeros(k_)  # intial value

    if l_ == 0:
        # if no constraints, then perform the Newton conjugate gradient
        # trust-region algorithm
        options = {'gtol': 1e-10}
        res = minimize(lagrangian, theta0, method='trust-ncg',
                       jac=gradient, hess=hessian, options=options)
    else:
        # otherwise perform sequential least squares programming
        options = {'ftol': 1e-10, 'disp': False, 'maxiter': 1000}
        alpha = -eye(l_, k_)
        constraints = {'type': 'ineq',
                       'fun': lambda theta: alpha @ theta}
        res = minimize(lagrangian, theta0, method='SLSQP', jac=gradient,
                       constraints=constraints, options=options)

    return np.squeeze(exp_family(res['x']))
Exemple #30
0
w[j_c] = 1 - np.sum(w)
# calculate contributions
es_contrib = beta_new * (w.T @ z_new_sort)
# print percentage contributions
pc_es_contrib = es_contrib / np.sum(es_yh)
print('Percentage contributions to negative expected shortfall')
print('-' * 55)
for k in range(1, k_ + 1):
    print('{:31}'.format(risk_factors[k - 1]) + ':',
          '{: 7.2%}'.format(pc_es_contrib[k]))
print('{:31}'.format('residual') + ':', '{: 7.2%}'.format(pc_es_contrib[0]))
print('')

# marginal contributions to the variance satisfaction measure
# find covariance
_, cov_z_new = meancov_sp(z_new, p)
# calculate contributions
var_contrib = -beta_new * (cov_z_new @ beta_new.T)
# print percentage contributions
pc_var_contrib = var_contrib / neg_var_yh
print('Percentage contributions to variance satisfaction measure')
print('-' * 57)
for k in range(1, k_ + 1):
    print('{:31}'.format(risk_factors[k - 1]) + ':',
          '{: 7.2%}'.format(pc_var_contrib[k]))
print('{:31}'.format('residual') + ':', '{: 7.2%}'.format(pc_var_contrib[0]))

# update output dictionary
output['-ES_k'] = es_contrib
output['-V_k'] = var_contrib
# -