Ejemplo n.º 1
0
def uncond_max_ir(k, sig2=sig2):

    # Monte Carlo scenarios for the signals
    s_j = simulate_normal(np.zeros((2)), sig2[-2:, -2:], 1000).T
    cond_mu_x_j = cond_exp_x(s_j[:k, :], k, sig2)

    # Monte Carlo scenarios for the conditioned max info ratio
    max_ir_j = cond_mu_x_j.T @ \
        np.linalg.solve(cond_cov_x(k, sig2),
                        cond_mu_x_j)

    return np.sqrt(np.trace(max_ir_j) / 1000)
Ejemplo n.º 2
0
labels = np.array(df_stocks.columns.codes)[0, :]  # sector indices

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_corr_clusters-implementation-step01): Compute the correlation matrix from the log-returns

epsi = np.diff(np.log(df_stocks), axis=0)  # log-returns
c2 = np.corrcoef(epsi.T)  # historical correlation
t_, n_ = epsi.shape

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_corr_clusters-implementation-step02): Sort the correlation matrix by sectors

i_s = np.argsort(labels)
c2_sec = c2[np.ix_(i_s, i_s)]  # correlation matrix sorted by sectors

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_corr_clusters-implementation-step03): Compute the initial clustering by computing the average of each sector

x = simulate_normal(np.zeros(n_), c2, 2 * n_)
k_ = sectors.shape[0]  # number of sectors
c0 = np.zeros((2 * n_, k_))
for k in range(k_):
    c0[:, k] = np.mean(x[:, labels == k], axis=1)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_corr_clusters-implementation-step04): Determine clusters and sort the correlation matrix accordingly

kmeans = KMeans(n_clusters=k_, init=c0.T, n_init=1).fit(x.T)  # fit
i_c = np.argsort(kmeans.labels_)
c2_clus = c2[np.ix_(i_c, i_c)]

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_corr_clusters-implementation-step05): Compute the minimum spanning tree

d = np.sqrt(2 * (1 - c2))  # distance matrix
gr = nx.from_numpy_matrix(d)
p = 0.6  # unconditional probability of the true dist
q = 0.5  # unconditional probability of the model
mu_x_0 = 3.5  # conditional expectation of the true distribution
mu_x_1 = 6  # conditional expectation of the true distribution
m_0 = 3  # conditional expectation of the model
m_1 = 5.5  # conditional expectation of the model
sig2_x_0 = 1.21  # conditional variance
sig2_x_1 = 0.64  # conditional variance
j_ = 10**5  # number of simulations

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_continuum_discrete_generative_pred-implementation-step01): Generate samples

# +
z = np.random.binomial(1, p, 100)
x = (1-z)*simulate_normal(mu_x_0, sig2_x_0, 100) +\
    z*simulate_normal(mu_x_1, sig2_x_1, 100)
z_q = np.random.binomial(1, q, j_)
x_q = (1 - z_q) * simulate_normal(m_0, 1, j_) + z_q * simulate_normal(
    m_1, 1, j_)

no_points_grid = 500
x_grid = np.linspace(min(np.percentile(x, 1), np.percentile(x_q, 1)),
                     max(np.percentile(x, 99), np.percentile(x_q, 99)),
                     no_points_grid)
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_continuum_discrete_generative_pred-implementation-step02): Compute expected score of the model

# +
Ejemplo n.º 4
0
j_ = 10**5  # number of scenarios
v_tnow = np.array([1, 1])  # current values
mu = np.array([0, 0])  # instruments P&L's expectations
h = np.array([45, 55])  # portfolio holdings
lambda_ = np.array([1 / 150, 1 / 200, 1 / 300])  # risk aversion parameters
rho = -0.5  # correlation parameter
# standard deviations appearing in the P&L's distributions
sig_11, sig_22 = 0.1, 0.3
sig2 = np.array([[(sig_11)**2, rho * sig_11 * sig_22],
                 [rho * sig_11 * sig_22, (sig_22)**2]])

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_certainty_equiv-implementation-step01): Simulate j_ scenarios for the instruments P&L's

n_ = len(h)  # number of the instruments
# scenarios for the standard normal random variable Z
z = simulate_normal(np.zeros(n_), np.eye(n_), j_, 'PCA')
sigma_riccati = solve_riccati(sig2, np.eye(n_))  # Riccati root of sigma2
mu = np.array([mu] * j_)  # duplicate expectation for j_ scenarios
v_tnow = np.array([v_tnow] * j_)  # duplicate initial values for j_scenarios
pi = np.exp(mu + z @ sigma_riccati) - v_tnow  # P&L's scenarios
p = np.ones(j_) / j_  # flat scenario-probabilities

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_certainty_equiv-implementation-step02): Compute the ex-ante performance scenarios

y_h = h @ pi.T  # ex-ante performance scenarios
# number of bins for the ex-ante performance histogram
bins = np.round(150 * np.log(j_))
# centers and heights of the bins
heights, centers = histogram_sp(y_h, p=p, k_=bins)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_certainty_equiv-implementation-step03): Compute the certainty-equivalent
Ejemplo n.º 5
0
import matplotlib.pyplot as plt

from arpym.tools import plot_ellipse, mahalanobis_dist, add_logo
from arpym.statistics import simulate_normal
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_min_vol_ellips-parameters)

j_ = 5000
mu = np.array([0, 0])  # expectation
rho = .6  # correlation
sigma2 = np.array([[1, rho], [rho, 1]])  # covariance

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_min_vol_ellips-implementation-step01): Generate j_normal scenarios

x = simulate_normal(mu, sigma2, j_)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_min_vol_ellips-implementation-step02): Rescale the covariance matrix

n_ = sigma2.shape[0]
sigma2_rescaled = n_ * sigma2

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_min_vol_ellips-implementation-step03): Generate location and dispersion to satisfy Mah distance constraint

# +
m = mu + np.random.rand(2)
a = np.random.rand(2, 2)
s2 = a @ a.T  # generate symmetric covariance matrix
mah_dist2 = np.zeros(j_)

for j in range(j_):
Ejemplo n.º 6
0
mu_x_0 = -1  # conditional expectation
mu_x_1 = 1.5  # conditional expectation
sigma2_x_0 = 0.36  # conditional variance
sigma2_x_1 = 0.49  # conditional variance
x_c = 1.5  # generic boundary point
x0 = -0.73  # generic decoder at 0
x1 = 1.92  # generic decoder at 1
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_autoencoders_kmeans-implementation-step01): Generate simulations of target variable

# +
# simulations of hidden variable
h = np.random.binomial(1, p_h, j_).reshape(-1)
# simulations of conditional target variable
x_h0 = simulate_normal(mu_x_0, sigma2_x_0, j_)
x_h1 = simulate_normal(mu_x_1, sigma2_x_1, j_)

# simulations of target variable
x = (1 - h) * x_h0 + h * x_h1
# mean and variance of target variable
e_x, cv_x = meancov_sp(x)

# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_autoencoders_kmeans-implementation-step02): Generic encoder, generic and best decoder, errors


# +
# encoding induced by generic cluster
def zeta_c2(x):
Ejemplo n.º 7
0
j_ = 10**4  # number of simulations


def chi_arb(var):
    return 1 / var


# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression-implementation-step01): Generate samples

# +
sig2_xz = np.array([[sig_x**2, rho_xz * sig_x * sig_z],
                    [rho_xz * sig_x * sig_z, sig_z**2]])
# jointly lognormal samples
x, z = np.exp(simulate_normal(mu_xz, sig2_xz, j_).T)

no_points_grid = 500
x_grid = np.linspace(10**-6,
                     2 * max(np.percentile(x, 95), np.percentile(z, 95)),
                     no_points_grid)
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression-implementation-step02): Compute prediction, residuals and E{X|z}

# +


def chi(var):
    return np.exp(mu_xz[0] + rho_xz * sig_x / sig_z *
                  (np.log(var) - mu_xz[1]) + 0.5 * (1 - rho_xz**2) * sig_x)
Ejemplo n.º 8
0
r2 = np.trace(
    sigma_xz @ np.linalg.inv(sigma2_z) @ sigma_xz.T) / np.trace(sigma2_x)
# -

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_regression_lfm-implementation-step04): Compute joint distribution of residulas and factors

a = np.zeros(n_ + k_)
a[:n_] = -alpha
b = np.eye(n_ + k_)
b[:n_, n_:] = -beta
mu_uz = a + b @ mu_xz
sigma2_uz = b @ sigma2_xz @ b.T

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_regression_lfm-implementation-step05): Compute simulations of target variable and factors

xz = simulate_normal(mu_xz, sigma2_xz, j_)
x_reg = alpha + beta @ xz[:, n_:].T

# ## Plots

# +
# number of simulations to plot
d = 200

z_1_low = np.percentile(xz[:, n_], 1)
z_1_upp = np.percentile(xz[:, n_], 99)
z_1 = np.arange(z_1_low, z_1_upp, 0.5)
z_2_low = np.percentile(xz[:, n_ + 1], 1)
z_2_upp = np.percentile(xz[:, n_ + 1], 99)
z_2 = np.arange(z_2_low, z_2_upp, 0.5)
Ejemplo n.º 9
0
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_simulate_payoff-parameters)

n_ = 50  # number of instruments
j_ = 50  # number of scenarios
rf = 0.05  # risk-free rate
a_p, b_p = 0.7, 1  # window for non-normalized probabilities of the scenarios
a_mu, b_mu = -0.3, 0.7  # window for random shifts of the payoffs
a_sd, b_sd = 0.8, 1  # window for random rescales of the payoffs
rho = 0.7  # correlation between initial normal variables that are used to generate the payoffs

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_simulate_payoff-implementation-step01): Generate the normal vector

c2 = (1 - rho) * np.eye(n_) + rho * np.ones((n_, n_))  # correlation matrix
x = simulate_normal(np.zeros(n_), c2, j_**2)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_simulate_payoff-implementation-step02): Generate the payoff matrix at time u = t + 2

# +
mu = np.exp(0.5)  # expectation of std lognormal distribution
sd = mu * np.sqrt(np.exp(1) - 1)  # stdev of std lognormal distribution

v_t2 = np.ones((j_**2, n_))
v_t2[:, 1] = np.exp(x[:, 1]) / sd
v_t2[:, 2::2] = (np.exp(x[:, 2::2]) - mu) / sd
v_t2[:, 3::2] = (-np.exp(-x[:, 3::2]) + mu) / sd
v_t2[:, 2:] = v_t2[:, 2:] * np.random.uniform(a_sd, b_sd, n_ - 2)  # scale
v_t2[:, 2:] = v_t2[:, 2:] + np.random.uniform(a_mu, b_mu, n_ - 2)  # shift
# -
Ejemplo n.º 10
0
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_factor_replication_logn-parameters)

n_max = 500  # max target dimension
n_step = np.arange(25, n_max + 25, 25)  # target dimension grid
j_ = 10000  # number of scenarios
mu = np.append(1, np.zeros(n_max))
delta = np.random.rand(n_max)
sigma2 = np.diag(np.append(1, delta**2))
c = np.exp(mu + np.diag(sigma2) / 2)

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_factor_replication_logn-implementation-step01): Choose arbitrary parameters

alpha = np.zeros(n_max)
beta = simulate_normal(np.zeros(1), np.eye(1),
                       n_max).reshape(-1, 1)  # generate normal scenarios

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_factor_replication_logn-implementation-step02): Compute scenarios of the factor, residuals and target variables

h = np.random.lognormal(mu[0], sigma2[0, 0], size=(j_, 1)) - c[0]
l = simulate_normal(np.zeros(n_max), np.eye(n_max), j_).reshape(-1, n_max)
u = np.exp(l * delta) - np.exp(delta**2 / 2.)
x = alpha + h @ beta.T + u

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_factor_replication_logn-implementation-step03): Compute expectation and covariance of the target variables

mu_x = alpha
sigma2_x = beta @ beta.T + np.diag(delta**2)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_factor_replication_logn-implementation-step04): Compute extraction matrix and r-squared
Ejemplo n.º 11
0
                             invariance_test_ellipsoid, \
                             simulate_normal
from arpym.tools import add_logo
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_elltest_normal-parameters)

t_ = 1000  # time series length
mu = 0  # expectation
sigma2 = 0.0625  # variance
l_ = 10  # lag for the ellipsoid test
conf_lev = 0.95  # confidence level

# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_elltest_normal-implementation-step00): Generate normal simulations

epsi = simulate_normal(mu, sigma2, t_)

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_elltest_normal-implementation-step01): Compute absolute values of normal simulations

epsi_abs = abs(epsi)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_elltest_normal-implementation-step02): Compute normalized absolute values

# grades of absolute values
epsi_abs_grade, *_ = cop_marg_sep(epsi_abs)
# normalized absolute values
epsiabs_tilde = st.norm.ppf(epsi_abs_grade).squeeze()

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_elltest_normal-implementation-step03): Ellipsoid test on normal simulations

plt.style.use('arpm')
Ejemplo n.º 12
0
from arpym.statistics import meancov_sp, simulate_normal
from arpym.tools import add_logo, pca_cov, plot_ellipse
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_uncorr_no_indep-parameters)

# +
j_ = 5*10**4  # number of simulations
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_uncorr_no_indep-implementation-step01): Generate simulations

# +
# simulations of X
x = simulate_normal(0, 1, j_)
# simulations of Y
y = x ** 2
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_uncorr_no_indep-implementation-step02): Compute sample mean and covariance

# +
# sample expecation and covariance
e_xy, cv_xy = meancov_sp(np.c_[x, y])

cv_x_y = meancov_sp(np.c_[x, y])
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_uncorr_no_indep-implementation-step03): Generate mean/covariance ellipse
Ejemplo n.º 13
0
import matplotlib.pyplot as plt

from arpym.estimation import effective_num_scenarios
from arpym.statistics import simulate_normal
from arpym.tools import add_logo
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-parameters)

t_ = 10000  # number of scenarios for flexible probabilities
k_ = 100  # number of values of gamma

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step01): Generate a random vector of flexible probabilities

# generate a vector of positive values
p = np.abs(simulate_normal(0, 1, t_))
p = p / np.sum(p)  # rescale so the probabilities add to one

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step02): Create a grid of gamma values

gamma_grid = np.linspace(0, 1 - 1.0e-7, num=k_)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step03): Calculate the effective number of scenarios

ens = effective_num_scenarios(p)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step04): Calculate the generalized effective number of scenarios for various values of gamma

ens_gamma = np.zeros(k_)
for k in range(k_):
    ens_gamma[k] = effective_num_scenarios(p,
Ejemplo n.º 14
0
              ]])

mu_epsi = np.zeros(n_)
s_1 = np.cos(theta[0])
s_2 = np.cos(theta[3])
rho = np.sin(theta[1])
sigma2_epsi = np.array([[s_1**2, rho * s_1 * s_2], [rho * s_1 * s_2, s_2**2]])
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step01): Simulate VAR(1) process

# +
mu_inf = np.linalg.solve(np.eye(n_) - b, mu_epsi)
sigma2_inf = np.linalg.solve(
    np.eye(n_**2) - np.kron(b, b), sigma2.reshape(n_**2, 1)).reshape(n_, n_)
x_tnow = simulate_normal(mu_inf, sigma2_inf, 1).reshape(n_)

x = simulate_var1(x_tnow, b, mu_epsi, sigma2_epsi, t_, j_=1).squeeze()
mu_x = np.linalg.solve((np.eye(n_) - b), mu_epsi)
sigma2_x = np.linalg.solve(
    np.eye(n_**2) - np.kron(b, b), sigma2_epsi.reshape(n_**2,
                                                       1)).reshape(n_, n_)
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step02): Compute spectral density

# +
ktilde_x = np.zeros((m_, n_, n_), dtype=complex)

sigma_epsi = transpose_square_root(sigma2_epsi)
for m in range(m_):
Ejemplo n.º 15
0
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_gaussian_mixture-parameters)

p = 0.6  # unconditional probability
mu_x_0 = 3.5  # conditional expectation
mu_x_1 = 6  # conditional expectation
sig2_x_0 = 1.21  # conditional variance
sig2_x_1 = 0.64  # conditional variance
x_cond = 5.5  # realization of X
j_ = 10**5  # number of simulations

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_gaussian_mixture-implementation-step01): Generate samples

z = np.random.binomial(1, p, j_)
x = (1-z)*simulate_normal(mu_x_0, sig2_x_0, j_) +\
    z*simulate_normal(mu_x_1, sig2_x_1, j_)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_gaussian_mixture-implementation-step02): Compute Z|x

# +
alpha = logit(p) - 0.5 * (np.log(sig2_x_1) - np.log(sig2_x_0) + mu_x_1 /
                          sig2_x_1 * mu_x_1 - mu_x_0 / sig2_x_0 * mu_x_0)
beta = mu_x_1 / sig2_x_1 - mu_x_0 / sig2_x_0
gamma = -0.5 * (1 / sig2_x_1 - 1 / sig2_x_0)


def logit_px(x):
    return alpha + beta * x + gamma * x**2

Ejemplo n.º 16
0
from arpym.tools import add_logo

# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_display_norm_copula-parameters)

j_ = 5000  # number of simulations
mu = np.array([0, 0])  # expectations
rho = -0.5  # correlation
svec = np.array([1, 1])  # standard deviations

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_display_norm_copula-implementation-step01): Generate normal scenarios and scenarios for the grades

# +
sigma2 = np.diag(svec) @ np.array([[1, rho], [rho, 1]]) @ np.diag(svec)
x = simulate_normal(mu, sigma2, j_)  # normal scenarios

u1 = stats.norm.cdf(x[:, 0], mu[0], svec[0])
u2 = stats.norm.cdf(x[:, 1], mu[1], svec[1])
u_x = np.array([u1, u2]).T  # grade scenarios

# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_display_norm_copula-implementation-step02): Compute pdf and cdf surfaces

# +
# grid in the unit square
grid = np.arange(0.01, 1, 0.01)
n_grid = len(grid)

pdf_u = np.zeros((n_grid, n_grid))
Ejemplo n.º 17
0
mu = [1, 3]  # location parameter
# dispersion parameters
rho_12 = -0.2
sig_1 = 0.5
sig_2 = 0.3
j_ = 10**2  # number of simulations
k_strk = 2.71  # strike
h = 0.5  # aproximation level

# # Step 1: Generate log-normal samples

sig2 = np.array([[sig_1**2, rho_12*sig_1*sig_2],
                 [rho_12*sig_1*sig_2, sig_2**2]])
# jointly lognormal samples
x = np.exp(simulate_normal(mu, sig2, j_))

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_dependence_structure_call_put-implementation-step01): Compute call and put payoffs

v_put = np.maximum(k_strk - x[:, 0], 0)
v_call = np.maximum(x[:, 0] - k_strk, 0)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_dependence_structure_call_put-implementation-step02):  Compute regularized call and put payoffs

v_put_h = regularized_payoff(x[:, 0], k_strk, h, method='put')  # regularized payoff of put option
v_call_h = regularized_payoff(x[:, 0], k_strk, h, method='call')  # regularized payoff of call option

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_dependence_structure_call_put-implementation-step03):  Compute Schweizer and Wolff

sw_x = schweizer_wolff(x)
sw_x1v_put_h = 1
Ejemplo n.º 18
0
import scipy as sp
import matplotlib.pyplot as plt

from arpym.statistics import simulate_normal
from arpym.tools import add_logo
# -

# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_strong_dominance-parameters)

mu_ = np.array([1, 0])  # mean vector of jointly normal variables
sigma2_ = np.array([[1, 0], [0, 1]])  # covariance matrix
j_ = 1000  # number of simulations

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_strong_dominance-implementation-step01): Simulate jointly normal random variables X_1 and X_2

x = simulate_normal(mu_, sigma2_, j_)
x_1, x_2 = x[:, 0], x[:, 1]

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_strong_dominance-implementation-step02): Simulate X_3 = X_2 + Y, Y chi-squared with 1 degree of freedom

x_3 = x_2 + sp.stats.chi2.rvs(1, size=(1, j_))

# ## Plots

# +
# set figure specifications
plt.style.use('arpm')
f, ax = plt.subplots(1,
                     2,
                     figsize=(1280.0 / 72.0, 720.0 / 72.0),
                     dpi=72.0,
Ejemplo n.º 19
0
b = 0.7  # autoregression parameter
mu_eps = 0  # location of the shocks
sigma2_eps = 1 - b**2  # dispersion of the shocks
t_ = 350  # lags
t_vec = np.arange(2 * t_ + 1)
tau = 100  # truncation
tau_vec = np.arange(-tau, tau + 1)
omega0 = 1 / 4 * np.pi
omega1 = 1 / 2 * np.pi

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_bandpass_filter_ar1-implementation-step01): Simulate stationary AR(1) process

mu_x = mu_eps / (1 - b)  # expectation of (stationary) AR(1)
sigma2_x = sigma2_eps / (1 - b**2)  # variance of (stationary) AR(1)
x0 = simulate_normal(mu_x, sigma2_x, 1)
x = simulate_var1(x0,
                  np.atleast_2d(b),
                  np.atleast_2d(mu_eps),
                  np.atleast_2d(sigma2_eps),
                  2 * t_,
                  j_=1).squeeze()

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_bandpass_filter_ar1-implementation-step02): Compute spectral density and bandpass filter

# +
omega_vec = np.zeros((2 * t_ + 1, 1))
for j in range(1, 2 * t_ + 2):
    omega_vec[j - 1] = ((-1)**(j - 1)) * j * np.pi / (2 * t_ + 1)
omega_vec = np.sort(omega_vec, axis=None)
ktilde_x = sigma2_eps / (1 - 2 * b * np.cos(omega_vec) + b**2)
Ejemplo n.º 20
0
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-parameters)

mu_x = np.array([0, 0])  # expectation
rho = 0.75  # correlation
sigma2_x = np.array([[1, rho], [rho, 1]])  # covariance
r = 2  # radius
j_ = 10000  # number of scenarios

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step01): Compute eigenvalue and eigenvectors

e, lambda2 = pca_cov(sigma2_x)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step02): Compute simulations of the target and factors

x = simulate_normal(mu_x, sigma2_x, j_)
z = (x - mu_x) @ e

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step03): Perform computations for plots

x_bar = plot_ellipse(mu_x,
                     sigma2_x,
                     r=r,
                     display_ellipse=False,
                     plot_axes=True,
                     plot_tang_box=True,
                     color='k')
[f_z1, xi_z1] = histogram_sp(z[:, 0], k_=300)
[f_z2, xi_z2] = histogram_sp(z[:, 1], k_=300)

# ## Plots
Ejemplo n.º 21
0
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-parameters)

# +
n_ = 100  # number of financial instruments
j_ = 10000  # number of simulations
v_budget = 5  # current budget
r_tnow_thor = 0.02  # risk-free interest rate
v_tnow = np.ones(n_)  # current values
sigma_mu = 1 / 30  # scale of compounded returns' expectation
sigma_bar = 1 / 40  # scale of compounded returns' covariance
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-implementation-step01): Generate the parameters of the compounded returns distribution

# +
mu = simulate_normal(np.zeros(n_), sigma_mu**2 * np.eye(n_),
                     1)  # expectation of compunded returns
a = np.zeros((n_, n_))
for n in range(n_):
    a[n, :] = simulate_normal(np.zeros(n_), sigma_bar**2 * np.eye(n_), 1)
sigma2 = (a @ a.T)  # covariance of compounded returns
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-implementation-step02): Generate the MC scenarios of the compounded returns

# +
c_tnow_thor = simulate_normal(mu, sigma2, j_)  # compounded returns scenarios
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_capm_like_identity-implementation-step03): Compute the scenarios of the linear returns

# +