Exemple #1
0
                  (spx_all['date'] < pd.to_datetime('2017-09-01'))]

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step01): Compute the S&P 500 compounded return

# invariants (S&P500 log-return)
epsi = np.diff(np.log(spx.SPX_close))  # S&P 500 index compounded return

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step02): Compute the time exponential decay probabilities

t_ = len(epsi)
t_star = t_
p_exp = exp_decay_fp(t_, tau_hl, t_star)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step03): Compute the effective number of scenarios

ens = effective_num_scenarios(p_exp)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step04): Compute flexible probabilities histogram

f_eps, x_eps = histogram_sp(epsi, p=p_exp, k_=10*np.log(t_))

# ## Plots

# +
# figure settings
plt.style.use('arpm')
grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
[color_map, p_colors] = colormap_fp(p_exp, np.min(p_exp), np.max(p_exp),
                                    grey_range, 0, 1, [1, 0])
myFmt = mdates.DateFormatter('%d-%b-%Y')
bar_dates = np.array(spx.date[1:])
Exemple #2
0
t_ = len(epsi)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_smooth_kernel_fp-implementation-step02): Compute the risk factor by smoothing and scoring VIX compounded return

tau_hl_smoo = 15
tau_hl_scor = 100
z_smooth = smoothing(c, tau_hl_smoo)  # smoothing
z = scoring(z_smooth, tau_hl_scor)  # scoring

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_smooth_kernel_fp-implementation-step03): Compute the smooth kernel probabilities

p_smooth = smooth_kernel_fp(z, z_star, h, gamma=gamma)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_smooth_kernel_fp-implementation-step04): Compute the effective number of scenarios

ens = effective_num_scenarios(p_smooth)

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_smooth_kernel_fp-implementation-step05): Compute flexible probabilities histogram

f_eps, x_eps = histogram_sp(epsi, p=p_smooth, k_=10*np.log(t_))

# ## Plots

# +
plt.style.use('arpm')

grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
[color_map, p_colors] = colormap_fp(p_smooth, np.min(p_smooth),
                                    np.max(p_smooth), grey_range, 0, 1,
                                    [1, 0])
plot_dates = np.array(date)
# +
path = '../../../databases/temporary-databases/'
# read the database
df = pd.read_csv(path + 'db_pric_options.csv', index_col=0)

pi_call = np.array(df['pi_call'])  # call option P&L scenarios
pi_put = np.array(df['pi_put'])  # put option P&L scenarios
p = np.array(df['p'])  # probabilities
dates = np.array(df.index.values)  # dates
# -

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_aggregation_options_hfp-implementation-step01): Compute the portfolio P&L scenarios and distribution

# +
pi_h = h.T @ np.r_[[pi_call], [pi_put]]  # portfolio P&L scenarios
ens = effective_num_scenarios(p)  # effective number scenarios

# mean and standard deviation of the portfolio P&L distribution
[mu_pi_h, sigma2_pi_h] = meancov_sp(pi_h, p)
sigma_pi_h = np.sqrt(sigma2_pi_h)

# mean and standard deviation of the call option P&L distribution
[mu_pi_call, sigma2_pi_call] = meancov_sp(pi_call, p)
sigma_pi_call = np.sqrt(sigma2_pi_call)

# mean and standard deviation of the put option P&L distribution
[mu_pi_put, sigma2_pi_put] = meancov_sp(pi_put, p)
sigma_pi_put = np.sqrt(sigma2_pi_put)
# -

# ## Plots
Exemple #4
0
t_ = len(epsi)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_state_crisp_fp-implementation-step02): Compute the risk factor by smoothing and scoring VIX compounded return

tau_hl_smoo = 15  # smoothing half-life parameter
tau_hl_scor = 100  # scoring half-life parameter
z_smooth = smoothing(c, tau_hl_smoo)  # smoothing
z = scoring(z_smooth, tau_hl_scor)  # scoring

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_state_crisp_fp-implementation-step03): Compute the state crisp probabilities

p_crisp, z_lb, z_ub = crisp_fp(z, z_star, alpha)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_state_crisp_fp-implementation-step04): Compute the effective number of scenarios

ens = effective_num_scenarios(p_crisp)

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_state_crisp_fp-implementation-step05): Compute flexible probabilities histogram

f_eps, x_eps = histogram_sp(epsi, p=p_crisp, k_=10*np.log(t_))

# ## Plots

# +
plt.style.use('arpm')

grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
[color_map, p_colors] = colormap_fp(p_crisp, np.min(p_crisp),
                                    np.max(p_crisp), grey_range, 0, 1,
                                    [1, 0])
plot_dates = np.array(date)
Exemple #5
0
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_min_entropy_fp-implementation-step02): Compute the risk factor by smoothing and scoring VIX compounded return

tau_hl_smoo = 22
tau_hl_scor = 144
z_smooth = smoothing(c, tau_hl_smoo)  # smoothing
z = scoring(z_smooth, tau_hl_scor)  # scoring

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_min_entropy_fp-implementation-step03): Compute the flexible probabilities conditioned via minimum relative entropy

prior = exp_decay_fp(t_-1, tau_hl_prior)
# minimum relative entropy flexible probabilities
p_entropy = conditional_fp(z, z_star, alpha, prior)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_min_entropy_fp-implementation-step04): Compute the effective number of scenarios

ens = effective_num_scenarios(p_entropy)  # effective number of scenarios

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_min_entropy_fp-implementation-step05): Compute the flexible probabilities histogram

n_bins = 10 * np.log(t_-1)
f_eps, x_eps = histogram_sp(epsi, p=p_entropy, k_=n_bins)  # flex. prob. hist.

# ## Plots

# +
plt.style.use('arpm')

grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
[color_map, p_colors] = colormap_fp(p_entropy, np.min(p_entropy),
                                    np.max(p_entropy), grey_range, 0, 1,
                                    [1, 0])
Exemple #6
0
k_ = 100  # size of grid of probabilities
min_p_1 = 0  # minimum value for p_1
max_p_1 = 1  # maximum value for p_1

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step01): Create flexible probabilities scenarios

# create flexible probabilities
p_1 = np.linspace(min_p_1, max_p_1, num=k_)
p_2 = np.ones(k_)-p_1
p = np.vstack((p_1, p_2))

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_ens_two_scenarios-implementation-step02): Calculate the effective number of scenarios

ens = np.zeros(k_)
for k in range(k_):
    ens[k] = effective_num_scenarios(p[:, k])

# ## Plots

# +
plt.style.use('arpm')

f = plt.figure(figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0)
plt.plot(p_1, ens, lw=1.5)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$p_1$', fontsize=17)
plt.ylabel('$ens(\mathbf{p})$', fontsize=17)
plt.title('Effective number of scenarios as the flexible probabilities vary\n'
          r'$\bar{t}=2$', fontsize=20, fontweight='bold')
add_logo(f, location=1, set_fig_size=False)
Exemple #7
0
t_ = 10000  # number of scenarios for flexible probabilities
k_ = 100  # number of values of gamma

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step01): Generate a random vector of flexible probabilities

# generate a vector of positive values
p = np.abs(simulate_normal(0, 1, t_))
p = p / np.sum(p)  # rescale so the probabilities add to one

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step02): Create a grid of gamma values

gamma_grid = np.linspace(0, 1 - 1.0e-7, num=k_)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step03): Calculate the effective number of scenarios

ens = effective_num_scenarios(p)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_expon_entropy-implementation-step04): Calculate the generalized effective number of scenarios for various values of gamma

ens_gamma = np.zeros(k_)
for k in range(k_):
    ens_gamma[k] = effective_num_scenarios(p,
                                           type_ent='gen_exp',
                                           gamma=gamma_grid[k])

# ## Plots

# +
plt.style.use('arpm')

f = plt.figure(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
Exemple #8
0
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_ens_exp_decay-implementation-step01): Create a grid of half-life values for plotting

tau_hl_grid = np.linspace(1, tau_hl_max, num=k_)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_ens_exp_decay-implementation-step02): Compute exponential decay probabilities

p = np.zeros((k_, t_))
for k in range(k_):
    p[k] = exp_decay_fp(t_, tau_hl_grid[k])

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_ens_exp_decay-implementation-step03): Compute effective number of scenarios

ens = np.zeros(len(tau_hl_grid))
ens_gamma = np.zeros(k_)
for k in range(k_):
    ens[k] = effective_num_scenarios(p[k])
    ens_gamma[k] = effective_num_scenarios(p[k],
                                           type_ent='gen_exp',
                                           gamma=gamma)

# ## Plots

# +
plt.style.use('arpm')

f = plt.figure(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
plt.xlim(0, np.ceil(tau_hl_max * 10 / t_) / 10)
plt.ylim(0, 1)
plt.plot(tau_hl_grid / t_,
         ens / t_,
         label=r'$ens(\mathbf{p})\backslash \bar{t}$',