# - # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_spectrum_filt-implementation-step01): Compute the log-returns epsi = np.diff(np.log(v), axis=0) # log-returns t_ = epsi.shape[0] # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_spectrum_filt-implementation-step02): Standardize time series of invariants # standardized invariants epsi_tilde = (epsi - np.mean(epsi, axis=0)) / np.std(epsi, axis=0) # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_spectrum_filt-implementation-step03): Compute the sample covariance matrix and its eigenvalues sigma2_hat = np.cov(epsi_tilde.T) # sample covariance _, lambda2_hat = pca_cov(sigma2_hat) # sample spectrum # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_spectrum_filt-implementation-step04): Perform spectrum shrinkage sigma2_bar, lambda2_bar, k_, err, y_mp, x_mp, dist = \ spectrum_shrink(sigma2_hat, t_) # ## Plots # + plt.style.use('arpm') c0_bl = [0.27, 0.4, 0.9] c1_or = [1, 0.5, 0.1] # spectrum plot
from arpym.statistics import simulate_normal from arpym.tools import histogram_sp, pca_cov, plot_ellipse, add_logo # - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-parameters) mu_x = np.array([0, 0]) # expectation rho = 0.75 # correlation sigma2_x = np.array([[1, rho], [rho, 1]]) # covariance r = 2 # radius j_ = 10000 # number of scenarios # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step01): Compute eigenvalue and eigenvectors e, lambda2 = pca_cov(sigma2_x) # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step02): Compute simulations of the target and factors x = simulate_normal(mu_x, sigma2_x, j_) z = (x - mu_x) @ e # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step03): Perform computations for plots x_bar = plot_ellipse(mu_x, sigma2_x, r=r, display_ellipse=False, plot_axes=True, plot_tang_box=True, color='k')
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_spectral_theorem-implementation-step01): Test the eigenvectors and eigenvalues # + is_eig_1 = np.allclose(np.matmul(s2, e_1), lambda2_1 * e_1) is_eig_2 = np.allclose(np.matmul(s2, e_2), lambda2_2 * e_2) print((lambda2_1, e_1), 'is an eigenvalue/eigenvector pair:', is_eig_1) print((lambda2_2, e_2), 'is an eigenvalue/eigenvector pair:', is_eig_2) # if inputs aren't eigenvalue/eigenvector pairs, calculate if not (is_eig_1 and is_eig_2): # check s2 is symmetric and positive (semi)definite (Sylvester's criterion) if np.allclose(s2[0][1], s2[1][0]) \ and np.linalg.det(s2) >= 0 and s2[0][0] >= 0: # calculate eigenvalues and eigenvectors eigvecs, eigvals = pca_cov(s2) lambda2_1 = eigvals[0] e_1 = eigvecs[:, 0] lambda2_2 = eigvals[1] e_2 = eigvecs[:, 1] is_eig_new_1 = np.allclose(np.matmul(s2, e_1), lambda2_1 * e_1) is_eig_new_2 = np.allclose(np.matmul(s2, e_2), lambda2_2 * e_2) print((lambda2_1, e_1), 'is an eigenvalue/eigenvector pair:', is_eig_new_1) print((lambda2_2, e_2), 'is an eigenvalue/eigenvector pair:', is_eig_new_2) else: print('s2 must be positive and symmetric') print( 'Make sure s2[0][1]=s2[1][0], s2[0][0]>=0 and np.linalg.det(s2)>=0' )
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step01): Compute linear returns of stocks x = v[1:, :] / v[:-1, :] - 1 # linear returns t_ = x.shape[0] # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step02): Estimate expectation and covariance of X and define sigma matrix m_x_hat, s2_x_hat = meancov_sp(x) # HFP moments sigma2 = np.diag(np.diag(s2_x_hat)) # scale matrix sigma = np.sqrt(sigma2) sigma_inv = np.diag(1 / np.diag(sigma)) # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step03): Compute principal component decomposition e_hat, lambda2_hat = pca_cov(sigma_inv @ s2_x_hat @ sigma_inv) # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step04): Estimate the loadings, the factor extraction matrix and shift alpha_hat_pc = m_x_hat # shift beta_hat_pc = sigma @ e_hat[:, :k_] # loadings gamma_hat_pc = e_hat[:, :k_].T @ sigma_inv # construction matrix # ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step05): Compute the factor realizations and their expectation and covariance z_hat_pc = (x - m_x_hat) @ gamma_hat_pc.T # factors m_z_hat, s2_z_hat = meancov_sp(z_hat_pc) # ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step06): Compute the residuals and the joint sample covariance of residuals and factors u = x - (alpha_hat_pc + z_hat_pc @ beta_hat_pc.T) # residuals
lambda_hat = np.zeros(t_) for t in range(0, t_): lambda_hat[t], _ = ewm_meancov(z_char[:t + 1], tau_hl_lam) # ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_characteristic_port_rev-implementation-step07): return vs expected returns, symm. regression line # + exp_ret = lambda_hat[-1] * beta[-1, :] / v[-2, :] real_ret = pl_real[-1, :] / v[-2, :] # symmetric regression exp_real = np.concatenate((exp_ret.reshape(-1, 1), real_ret.reshape(-1, 1)), axis=1) mu_exp_real, sig2_exp_real = meancov_sp(exp_real) e, l = pca_cov(sig2_exp_real) mu_real = mu_exp_real[1] mu_exp = mu_exp_real[0] beta_sym = -e[1, 1] / e[0, 1] alpha_sym = mu_exp - beta_sym * mu_real x = 2 * np.arange(-10, 11) / 10 y = beta_sym * x + alpha_sym # - # ## Save characteristics portfolios output = { 'w_shift': pd.Series(w_shift), 'h_char': pd.Series(h_char.reshape(t_ * n_)), 'n_': pd.Series(n_) }
mu_x = np.array([1., 0., 3.]) # expectation of the target variable sig2_x = np.array([[1., -0.4, 0.68], [-0.4, 1., -0.58], [0.68, -0.58, 1.]]) # covariance of the target variable sig2 = np.eye(3) # scale matrix n_ = len(mu_x) # target dimension k_ = 2 # number of factors j_ = 1000 # number of scenarios # ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_principal_component_lfm-implementation-step00): Compute Riccati root of the scale matrix sig = transpose_square_root(sig2) # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_principal_component_lfm-implementation-step01): Compute the eigenvalues and eigenvectors sig_inv = np.linalg.solve(sig, np.eye(n_)) e, lambda2 = pca_cov(sig_inv @ sig2_x @ sig_inv) # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_principal_component_lfm-implementation-step02): Compute the loadings, factor-construction matrix and r-square # + beta = sig @ e[:, :k_] # principle-component loadings gamma = e[:, :k_].T @ sig_inv # factor-construction matrix alpha = mu_x # optimal coefficient a r2_sig2 = np.sum(lambda2[:k_]) / np.sum(lambda2) # r-squared # - # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_principal_component_lfm-implementation-step03): Compute mean and cov of factors and factor-recovered variables # + mu_z = 0
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_spectral_representation-implementation-step02): Choose subsequent observations t0 = np.int(t_ / 2) # choose far from initial point to have more stationarity x_vec = x[t0 - t:t0 + t + 1, :] mu_x_vec = mu_x * np.ones((2 * t + 1, j_)) t_vec = np.linspace(-t, t, 2 * t + 1) # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_spectral_representation-implementation-step03): Compute covariance matrix of random vector x_vec k_x = b**(np.arange(2 * t + 1)) / (1 - b**2) * sigma2_eps # autocovariance fun cv_x = toeplitz(k_x) # autocovariance matrix # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_spectral_representation-implementation-step04): Compute eigenvectors/eigenvalues # + e, lambda2 = pca_cov(cv_x) if b < 0: ind_asc = np.argsort(lambda2) lambda2 = lambda2[ind_asc] e = e[:, ind_asc] lambda2_new = [] ind_e = [] * (2 * t + 1) for n in range(1, 2 * t + 2): if n % 2 == 1: lambda2_new = np.append(lambda2_new, lambda2[n - 1]) ind_e = np.append(ind_e, n - 1) else: lambda2_new = np.append(lambda2[n - 1], lambda2_new) ind_e = np.append(n - 1, ind_e)
# - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_ellipsoid_multiv_exp_cov-parameters) mu = np.array([1, 1]) # expectation vector sigma2 = np.array([[1, 0.7], [0.7, 1]]) # covariance matrix # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_ellipsoid_multiv_exp_cov-implementation-step01): Generate points on the unit sphere theta = np.linspace(0, 2*np.pi, num=200) y = np.array([[np.cos(angle), np.sin(angle)] for angle in theta]).T # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_ellipsoid_multiv_exp_cov-implementation-step02): Calculate spectral decomposition e, lambda2_vec = pca_cov(sigma2) e[[1, 0]] = e[[0, 1]] diag_lambda = np.diag(np.sqrt(lambda2_vec)) # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_ellipsoid_multiv_exp_cov-implementation-step03): Stretch the unit circle: multiply by eigenvalues z = np.matmul(diag_lambda, y) # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_ellipsoid_multiv_exp_cov-implementation-step04): Rotate the ellipsoid: multiply by eigenvectors u = np.matmul(e, z) # ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_ellipsoid_multiv_exp_cov-implementation-step05): Translate the ellipsoid: add expectation vector x = (u.T + mu).T
n_ = int(data['n_'][0]) tau = data['tau'].values[:n_] m_hat_HFP_x = data['m_hat_HFP_x'].values[:n_] s2_hat_HFP_x = pd.read_csv( '../../../databases/temporary-databases/db_pca_yield_tools.csv', usecols=['s2_hat_HFP_x']).values.reshape(n_, n_) s_vec = data['s_vec'].values[:n_] yields = pd.read_csv( '../../../databases/temporary-databases/db_pca_yield_data.csv') l_ = int(yields['l_'][0]) y = pd.read_csv('../../../databases/temporary-databases/db_pca_yield_data.csv', usecols=['y']).values.reshape(l_, n_) # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_pca_empirical-implementation-step01): Compute eigenvectors, eigenvalues and r2 e_hat, lambda2_hat = pca_cov(s2_hat_HFP_x) # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_pca_empirical-implementation-step02): Compute factor shifting # + y_pred = [] y_temp = np.zeros((n_, 3)) for k in range(k_): shift = r * np.sqrt(lambda2_hat[k]) * e_hat[:, k] y_temp = np.zeros((n_, 3)) y_temp[:, 0] = y[0, :] y_temp[:, 1] = y[0, :] + shift y_temp[:, 2] = y[0, :] - shift y_pred.append(y_temp) # -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_normal_mean_regression-implementation-step03): Expectation and covariance of (Xreg, U) # + a = np.array([alpha, -alpha]).reshape(-1, 1) b = np.array([[0, beta], [1, -beta]]) mu_xreg_u = a + b @ np.reshape(mu_xz, (-1, 1)) sigma2_xreg_u = b @ sigma2_xz @ b.T # - # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_normal_mean_regression-implementation-step04): Expectation-covariance ellipsoid computations # + # covariance matrix decomposition e, lambda2 = pca_cov(sigma2_xreg_u) diag_lambda = np.diagflat(np.sqrt(lambda2)) # expectation-covariance ellipsoid computations theta = np.linspace(0, 2*np.pi, 200) # angle y = [np.cos(theta), np.sin(theta)] # circle parametrization axes_points = np.array([[1, -1, 0, 0], [0, 0, 1, -1]]) ellipse = mu_xreg_u + e @ diag_lambda @ y axes_points_transformed = mu_xreg_u + e @ diag_lambda @ axes_points # - # ## Plots # + # Compute the rectangles vertices and edges
lambda_hat = np.zeros(t_) for t in range(0, t_): lambda_hat[t], _ = ewm_meancov(z_char[:t + 1], tau_hl_lam) # ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_flexible_characteristic_port_rev-implementation-step07): return vs expected returns, symm. regression line # + exp_ret = lambda_hat[-1] * beta[-1, :] / v[-2, :] real_ret = pl_real[-1, :] / v[-2, :] # symmetric regression exp_real = np.concatenate((exp_ret.reshape(-1, 1), real_ret.reshape(-1, 1)), axis=1) mu_exp_real, sig2_exp_real = meancov_sp(exp_real) e, _ = pca_cov(sig2_exp_real) mu_real = mu_exp_real[1] mu_exp = mu_exp_real[0] beta_sym = -e[1, 1] / e[0, 1] alpha_sym = mu_exp - beta_sym*mu_real x = 2 * np.arange(-10, 11) / 10 y = beta_sym * x + alpha_sym # - # ## Save characteristics portfolios output = {'w_shift': pd.Series(w_shift), 'h_char': pd.Series(h_char.reshape(t_ * n_)), 'n_': pd.Series(n_) } df = pd.DataFrame(output)