Beispiel #1
0
xi_tilde = np.zeros((t_, n_))
for n in range(n_):
    u = t.cdf(xi[:, n],
              df=10**6,
              loc=mu_marg[n],
              scale=np.sqrt(sigma2_marg[n]))
    u[u <= 10**(-7)] = 10**(-7)
    u[u >= 1 - 10**(-7)] = 1 - 10**(-7)
    xi_tilde[:, n] = t.ppf(u, df=10**6)

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step06): Estimate the unconditional correlation matrix via MLFP

# +
_, sigma2_xi_tilde = fit_locdisp_mlfp(xi_tilde, p=p, nu=10**6)
rho2_xi_tilde, _ = cov_2_corr(sigma2_xi_tilde)
rho2 = rho2_xi_tilde

beta, delta2 = factor_analysis_paf(rho2_xi_tilde, k_)
rho2 = beta @ beta.T + np.diag(delta2)
# -

# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_dcc_fit-implementation-step07): Compute the time series of true invariants via DCC fit

params, r2_t, epsi, q2_t_ = fit_dcc_t(xi_tilde, p, rho2=rho2)
c, a, b = params
q2_t_nextstep = c*rho2 +\
                b*q2_t_ +\
                a*(np.array([epsi[-1, :]])[email protected]([epsi[-1, :]]))
r2_t_nextstep, _ = cov_2_corr(q2_t_nextstep)
Beispiel #2
0
sigma2_z = sigma2_xz[n_:, n_:]
mu_z = mu_xz[n_:]
mu_x = mu_xz[:n_]
beta = sigma_xz @ np.linalg.inv(sigma2_z)
alpha = mu_x - beta @ mu_z
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_regression_lfm-implementation-step02): Compute expectation and covariance of prediction

mu_xreg_bar = alpha + beta @ mu_z
sigma2_xreg_bar = beta @ sigma2_z @ beta.T

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_regression_lfm-implementation-step03): Compute the r-squared

# +
c2_xz, _ = cov_2_corr(sigma2_xz)

sigma2_x = sigma2_xz[:n_, :n_]
r2 = np.trace(
    sigma_xz @ np.linalg.inv(sigma2_z) @ sigma_xz.T) / np.trace(sigma2_x)
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_regression_lfm-implementation-step04): Compute joint distribution of residulas and factors

a = np.zeros(n_ + k_)
a[:n_] = -alpha
b = np.eye(n_ + k_)
b[:n_, n_:] = -beta
mu_uz = a + b @ mu_xz
sigma2_uz = b @ sigma2_xz @ b.T
Beispiel #3
0
beta_hat_pc = sigma @ e_hat[:, :k_]  # loadings
gamma_hat_pc = e_hat[:, :k_].T @ sigma_inv  # construction matrix

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step05): Compute the factor realizations and their expectation and covariance

z_hat_pc = (x - m_x_hat) @ gamma_hat_pc.T  # factors
m_z_hat, s2_z_hat = meancov_sp(z_hat_pc)

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step06): Compute the residuals and the joint sample covariance of residuals and factors

u = x - (alpha_hat_pc + z_hat_pc @ beta_hat_pc.T)  # residuals
_, s2_uz_hat = meancov_sp((np.c_[u, z_hat_pc]))

# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step07): Compute correlations among  residuals

c2_uz_hat, _ = cov_2_corr(s2_uz_hat)
c2_u_hat = c2_uz_hat[:n_, :n_]  # correlation among residuals

# ## [Step 8](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step08): Compute the truncated covariance of the returns

s2_u_hat = s2_uz_hat[:n_, :n_]
s_u_hat = np.sqrt(np.diag(s2_u_hat))
s2_x_trunc = beta_hat_pc@s2_z_hat@beta_hat_pc.T +\
                    np.diag(np.diag(s_u_hat))  # truncated covariance

# ## [Step 9](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step09): Estimate the standard deviations of the portfolio returns using the sample covariance and the truncated covariance

# +
w1 = 1 / n_ * np.ones((n_, 1))  # equal-weights portfolio

w2 = np.zeros((n_, 1))  # long-short portfolio
Beispiel #4
0
        epsi_tilde[:, i, l] = tstu.ppf(u[:, i], nu_vec_cop[l])

    # estimate copula parameters with maximum likelihood
    _, sig2 = \
        fit_locdisp_mlfp_difflength(epsi_tilde[:, :, l],
                                    p=p_copula,
                                    nu=nu_vec_cop[l],
                                    threshold=10 ** -3,
                                    maxiter=1000)

    # shrinkage: factor analysis
    beta, delta2 = factor_analysis_paf(sig2, k_)
    sig2_fa = beta @ beta.T + np.diag(delta2)

    # compute correlation matrix
    rho2_copula_vec[:, :, l], _ = cov_2_corr(sig2_fa)

    # compute log-likelihood at times with no missing values
    llike_nu[l] = np.sum(p_copula_bonds * np.log(
        mvt_pdf(epsi_bonds, np.zeros(i_), rho2_copula_vec[:, :, l],
                nu_vec_cop[l])))

# choose nu that gives the highest log-likelihood
l_nu = np.argsort(llike_nu)[-1]
db_estimation_copula = {
    'nu': np.int(nu_vec_cop[l_nu]),
    'rho2': rho2_copula_vec[:, :, l_nu]
}
# -

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step03-implementation-step06): Estimate the distribution of the credit structural invariant
Beispiel #5
0
                      parse_dates=True)
db_epsi = db_epsi.iloc[:, :i_]

dates = db_epsi.index
t_ = len(dates)
stocks_names = db_epsi.columns
epsi = db_epsi.values

# Location-dispersion
db_locdisp = pd.read_csv(path_temp + 'db_fit_garch_stocks_locdisp.csv')
mu_hat = db_locdisp.loc[:, 'mu_hat'].values[:i_]
sig2_hat = db_locdisp.loc[:, 'sig2_hat'].values
i_tot = int(np.sqrt(len(sig2_hat)))
sig2_hat = sig2_hat.reshape(i_tot, i_tot)[:i_, :i_]

sig2_hat = cov_2_corr(sig2_hat)[0]
phi2_hat = np.linalg.solve(sig2_hat, np.eye(i_))
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_cov_glasso-implementation-step01): Glasso shrinkage

k = int(i_ * (i_ - 1))  # shrink all covariances to 0
sig2_glasso, _, phi2_glasso, lam, conv, _ =\
    markov_network(sig2_hat, k, lambda_vec)

# ## Plots

# +
plt.style.use('arpm')

# Graph
Beispiel #6
0
i = v @ mu_r_equil + eta * np.sqrt(np.diag(v @ sig2_hat_r @ v.T))
sig2_i_mu = ((1 - c) / (tau * c)) * (v @ sig2_hat_r @ v.T)

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step05): Compute effective rank corresponding to the pick matrix

# +


def eff_rank(s2):
    lam2_n, _ = np.linalg.eig(s2)
    wn = lam2_n / np.sum(lam2_n)
    return np.exp(-wn @ np.log(wn))


eff_rank = eff_rank(cov_2_corr(v @ sig2_hat_r @ v.T)[0])
# -

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step06): Compute Black-Litterman posterior parameters

mu_m_pos, cv_pos_pred = black_litterman(mu_r_equil, sig2_hat_r, tau, v, i,
                                        sig2_i_mu)

# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_bl_equilibrium_ret-implementation-step07):  Compute Black-Litterman posterior parameters in the case of uninformative views

# +
# compute vector quantifying the views in covariance
sig2_unifview = ((1 - c_uninf) / c_uninf) * v @ sig2_hat_r @ v.T

mu_m_pos, cv_pos_pred = black_litterman(mu_r_equil, sig2_hat_r, tau, v, i,
                                        sig2_unifview)
Beispiel #7
0
z = (v_sector[1:, :] - v_sector[:-1, :]) / v_sector[:-1, :]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_reg_truncated_lfm-implementation-step02): Compute OLSFP estimates and residuals

alpha, beta, s2, u = fit_lfm_ols(x, z)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_reg_truncated_lfm-implementation-step03): Compute the joint covariance and correlation

# +
# compute covariance
[mu_uz, sig2_uz] = meancov_sp(np.hstack((u, z)))
sig2_u = sig2_uz[:n_, :n_]
sig2_z = sig2_uz[n_:, n_:]

# compute correlation
c2_uz, _ = cov_2_corr(sig2_uz)
c_uz = c2_uz[:n_, n_:]
c2_u = np.tril(c2_uz[:n_, :n_], -1)
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_reg_truncated_lfm-implementation-step04): Compute standard deviations of two portfolios

# +
w_1 = np.ones(n_) / n_  # equal weight portfolio
w_2 = np.zeros(n_)  # long/shoft portfolio
w_2[n_long] = 0.69158715
w_2[n_short] = np.array([-0.67752045, -0.01406671])

_, sig2_x = meancov_sp(x)
sig2_x_trunc = beta @ sig2_z @ beta.T + np.diag(np.diag(sig2_u))
Beispiel #8
0
alpha_mlfp_t, beta_mlfp_t, sig2_u_mlfp_t, _ = fit_lfm_mlfp(x, z, p, nu)

# compute r-squared
u_mlfp_t = x - alpha_mlfp_t - z @ beta_mlfp_t.T
r2_mlfp_t = multi_r2(sig2_u_mlfp_t, sig2_hat)
# -

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-implementation-step05): Bayesian loadings

# +
# Prior
beta_pri = np.zeros((n_, k_))
sig2_z_pri = sig2_z_hat
t_pri = pri_param_load * t_

sig2_pri = np.diag(cov_2_corr(sig2_hat)[1]**2)
nu_pri = pri_param_load * t_

# Posterior
t_pos = t_pri + t_
nu_pos = nu_pri + t_

beta_pos = (t_pri*beta_pri@sig2_z_pri + t_*beta_ols@sig2_z_hat) @\
    np.linalg.solve(t_pri*sig2_z_pri + t_*sig2_z_hat, np.eye(k_))

sig2_z_pos = 1 / t_pos * (t_pri * sig2_z_pri + t_ * sig2_z_hat)

sig2_pos_load = 1 / nu_pos * (t_ * sig2_hat + nu_pri * sig2_pri +
                              t_pri * beta_pri @ sig2_z_pri @ beta_pri.T +
                              t_ * beta_ols @ sig2_z_hat @ beta_ols.T -
                              t_pos * beta_pos @ sig2_z_pos @ beta_pos.T)
kend_x = 2 / np.pi * np.arcsin(rho_12)
kend_x1v_put_h = -1
kend_x1v_call_h = 1
kend_x2v_put_h = -2 / np.pi * np.arcsin(rho_12)
kend_x2v_call_h = 2 / np.pi * np.arcsin(rho_12)

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_dependence_structure_call_put-implementation-step05): Compute Spearman rho

# compute grades scenarios
u_x, _, _ = cop_marg_sep(x)
u_v_p, _, _ = cop_marg_sep(v_put_h)
u_v_c, _, _ = cop_marg_sep(v_call_h)

# Spearman rho
_, cov_u = meancov_sp(np.c_[u_x[:, 0], u_v_p, u_v_c, u_x[:, 1]])
spear, _ = cov_2_corr(cov_u)

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_dependence_structure_call_put-implementation-step06): Compute correlation

cov = np.cov(np.c_[x[:, 0], v_put_h, v_call_h, x[:, 1]].T)
corr, _ = cov_2_corr(cov)

# ## Plots

# +
plt.style.use('arpm')
violet = [170/255,	 85/255, 187/255]
teal = [60/255,	 149/255, 145/255]

f = plt.figure()
mydpi = 72.0
Beispiel #10
0
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-parameters)

tau_hl = 180  # half life
k_ = 25  # number of hidden factors

# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step00): Load data

path = '../../../databases/temporary-databases/'
xi = np.array(pd.read_csv(path + 'db_GARCH_residuals.csv', index_col=0))
t_, n_ = xi.shape

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step01): Compute the HFP correlation

p = exp_decay_fp(t_, tau_hl)
_, sigma2 = meancov_sp(xi, p)
c2, _ = cov_2_corr(sigma2)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step02): Compute the loadings and residual variances via PAF factor analysis

beta, delta2 = factor_analysis_paf(c2, k_)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_shrinkage_factor-implementation-step03): Compute the factor analysis correlation and the Frobenius norm

c2_paf = beta @ beta.T + np.diag(delta2)
d_fro = np.linalg.norm(c2 - c2_paf, ord='fro') / \
        np.linalg.norm(c2, ord='fro') * 100.

# ## Plots

# +
plt.style.use('arpm')
Beispiel #11
0
                      parse_dates=True)
db_epsi = db_epsi.iloc[:, :i_]

dates = db_epsi.index
t_ = len(dates)
stocks_names = db_epsi.columns
epsi = db_epsi.values

# Location-dispersion
db_locdisp = pd.read_csv(path_temp + 'db_fit_garch_stocks_locdisp.csv')
mu_hat = db_locdisp.loc[:, 'mu_hat'].values[:i_]
sig2_hat = db_locdisp.loc[:, 'sig2_hat'].values
i_tot = int(np.sqrt(len(sig2_hat)))
sig2_hat = sig2_hat.reshape(i_tot, i_tot)[:i_, :i_]

sig2_hat = cov_2_corr(sig2_hat)[0]
phi2_hat = np.linalg.solve(sig2_hat, np.eye(i_))
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_bayesian_estimation-implementation-step01): Bayesian estimation

# +
# Prior
mu_pri = np.zeros(i_)
sig2_pri = np.diag(cov_2_corr(sig2_hat)[1]**2)
t_pri = int(pri_t_ * t_)
nu_pri = int(pri_t_ * t_)

# Posterior
t_pos = t_pri + t_
nu_pos = nu_pri + t_
Beispiel #12
0
from arpym.tools import add_logo
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-parameters)

h2 = 0.01  # bandwidth
x = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]).T  # joint scenarios
p = np.array([0.33, 0.10, 0.20, 0.37])  # probabilities

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-implementation-step01): Compute expectation and covariance

m, s2 = meancov_sp(x, p)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-implementation-step02): Compute correlation matrix

c2, _ = cov_2_corr(s2)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_scen_prob_pdf-implementation-step03): Compute the scenario-probability pdf

# +
# grid points for pdf evaluation
x1_grid = np.arange(np.min(x[:, 0]) - 0.5, np.max(x[:, 0]) + 0.5, 0.025)
x2_grid = np.arange(np.min(x[:, 1]) - 0.5, np.max(x[:, 1]) + 0.5, 0.025)
x_grid = np.array([[x1, x2] for x1 in x1_grid for x2 in x2_grid])

# scenario-probability pdf
f = pdf_sp(h2, x_grid, x, p)
# -

# ## Plots
mu_x, sig2_x = meancov_sp(x)
beta_ = beta.T / np.diag(sig2_x)
gamma = np.linalg.solve(beta_ @ beta, beta_)
proj = beta @ gamma
alpha_cs = mu_x - proj @ mu_x

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_cross_section_truncated_lfm-implementation-step03): Compute cross-sectional factors and residuals

z_cs = x @ gamma.T
u_cs = x - alpha_cs - z_cs @ beta.T

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_cross_section_truncated_lfm-implementation-step04): Estimate correlations between exogenous and cross-sectional factors

_, sig2_zz = meancov_sp(np.hstack((z, z_cs)))
c2_zz, _ = cov_2_corr(sig2_zz)  # joint correlation
c_zz = np.diag(c2_zz[:k_, k_:])

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_cross_section_truncated_lfm-implementation-step05): Compute the joint covariance and correlation

# +
mu_uz, sig2_uz = meancov_sp(np.hstack((u_cs, z_cs)))
sig2_u = sig2_uz[:n_, :n_]
sig2_z = sig2_uz[n_:, n_:]

c2_uz, _ = cov_2_corr(sig2_uz)
c_uz = c2_uz[:n_, n_:]
c2_u = np.tril(c2_uz[:n_, :n_], -1)
# -

# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_cross_section_truncated_lfm-implementation-step06): Compute the risk premia
Beispiel #14
0
df = pd.read_csv(path + 'data.csv', index_col=0)
y = np.array(df.loc[:, tau.astype('str')])
y = y[1800:, ]  # remove missing data
fx_df = pd.read_csv(path + 'data.csv', usecols=['dates'],
                    parse_dates=['dates'])
fx_df = fx_df[1801:]

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step01): Compute invariants

x = np.diff(y, n=1, axis=0)
t_, n_ = x.shape

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step02): Compute HFP mean, covariance, correlation and vector of standard deviations

m_hat_HFP_x, s2_hat_HFP_x = meancov_sp(x)
c2_HFP_x, s_vec = cov_2_corr(s2_hat_HFP_x)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step03): Fit and compute the Toeplitz cross-diagonal form

c2_star, gamma_star = min_corr_toeplitz(c2_HFP_x, tau)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_yield_change_correlation-implementation-step04): Save the data

# +
output = {
          'tau': pd.Series(tau),
          'n_': pd.Series(x.shape[1]),
          'gamma_star': pd.Series(gamma_star),
          'm_hat_HFP_x': pd.Series(m_hat_HFP_x),
          's2_hat_HFP_x': pd.Series((s2_hat_HFP_x.reshape(-1))),
          's_vec': pd.Series(s_vec),
# view quantification parameters
mu_view = np.array([1.02, -0.50])
sig2_view = np.array([[0.19, 0.09], [0.09, 0.44]])

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_min_rel_ent_partial_view-implementation-step01): Compute effective ranks corresponding to the pick matrices

# +


def eff_rank(s2):
    lam2_n, _ = np.linalg.eig(s2)
    w_n = lam2_n / np.sum(lam2_n)
    return np.exp(-w_n @ np.log(w_n))


eff_rank_v_mu = eff_rank(cov_2_corr(v_mu @ sig2_x_base @ v_mu.T)[0])
eff_rank_v_sig = eff_rank(cov_2_corr(v_sig @ sig2_x_base @ v_sig.T)[0])
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_min_rel_ent_partial_view-implementation-step02): Compute updated parameters

mu_x_upd, sig2_x_upd = min_rel_entropy_normal(mu_x_base, sig2_x_base, v_mu,
                                              mu_view, v_sig, sig2_view)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_min_rel_ent_partial_view-implementation-step03): Compute projectors

# +
k_ = len(mu_view)  # view variables dimension
n_ = len(mu_x_base)  # market dimension

v_mu_inv = sig2_x_base @ v_mu.T @ np.linalg.solve(v_mu @ sig2_x_base @ v_mu.T,
Beispiel #16
0
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-implementation-step02): Generate the MC scenarios of the compounded returns

# +
c_tnow_thor = simulate_normal(mu, sigma2, j_)  # compounded returns scenarios
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-implementation-step03): Compute the scenarios of the linear returns

# +
# linear returns scenarios
r_tnow_thor_j = np.exp(c_tnow_thor) - 1
# linear returns expectation and covariance
mu_r, sigma2_r = meancov_sp(r_tnow_thor_j)
# correlation and volatility vector
c2_r, sigmavol_r = cov_2_corr(sigma2_r)
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-implementation-step04): Compute the MC scenarios of P&L's

# +
# P&L scenarios
pi_tnow_thor = r_tnow_thor_j * v_tnow
# P&L expectation and covariance
mu_pi, sigma2_pi = meancov_sp(pi_tnow_thor)
# -

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-implementation-step05): Compute the maximum Sharpe ratio portfolio

# +
# maximum Sharpe ratio portfolio