예제 #1
0
sig2_x_trunc = beta @ sig2_z @ beta.T + np.diag(np.diag(sig2_u))

std_1 = np.sqrt(w_1.T @ sig2_x @ w_1)
std_trunc_1 = np.sqrt(w_1.T @ sig2_x_trunc @ w_1)

std_2 = np.sqrt(w_2.T @ sig2_x @ w_2)
std_trunc_2 = np.sqrt(w_2.T @ sig2_x_trunc @ w_2)
# -

# ## Plots

# +
plt.style.use('arpm')

# (untruncated) correlations among residuals
fig1 = plt.figure()
f, xi = histogram_sp(c2_u[np.nonzero(c2_u)])
plt.bar(xi, f, width=xi[1] - xi[0], facecolor=[.7, .7, .7], edgecolor='k')
plt.title('Correlations among residuals')
add_logo(fig1)

# (untruncated) correlations between factors and residuals
fig2 = plt.figure()
f, xi = histogram_sp(c_uz.reshape((n_ * k_, )))
plt.bar(xi, f, width=xi[1] - xi[0], facecolor=[.7, .7, .7], edgecolor='k')
plt.title('Correlations between factors residuals')
add_logo(fig2, location=1)

plt.tight_layout()
예제 #2
0
k = 4  # shape parameter
theta = 4  # scale parameter
j_ = 100000  # number of scenarios

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_uniform_to_gamma-implementation-step01): Generate a uniform sample

u = np.random.rand(j_)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_uniform_to_gamma-implementation-step02): Apply inverse transform sampling

x = stats.gamma.ppf(u, k , theta)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_uniform_to_gamma-implementation-step03): Compute the empirical histogram of the pdf of the new sample

k_bar = np.round(5*np.log(j_))
[f_hist, xi] = histogram_sp(x, k_=k_bar)

# ## Plots

plt.style.use('arpm')
fig = plt.figure(figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0)
plt.title('Uniform-to-gamma mapping', fontsize=20, fontweight='bold')
# empirical pdf
plt.bar(xi, f_hist, width=xi[1]-xi[0], facecolor=[.7, .7, .7],
        edgecolor='k',  label='empirical pdf')
# analytical pdf
plt.plot(xi, stats.gamma.pdf(xi, k , theta),
         color='red', lw=5, label='gamma pdf')
plt.grid(True)
plt.ylim([0, 1.1*np.max(f_hist)])
plt.xticks(fontsize=14)
예제 #3
0
# spectrum plot
fig1 = plt.figure()
plt.bar(np.arange(i_), np.log(lambda2_hat), facecolor=c0_bl,
        label='sample spectrum')
plt.plot(np.arange(k_), np.log(lambda2_bar[:k_]), color=c1_or, lw=2)
plt.plot(np.arange(k_, i_), np.log(lambda2_bar[k_:i_]), color=c1_or, lw=2,
         label='filtered spectrum')
plt.legend()
plt.title('Spectrum')
plt.ylabel('log-eigenvalues')
plt.xlabel('stocks')
add_logo(fig1, location=5)

# spectrum distribution
fig2 = plt.figure()
density, xbin = histogram_sp(lambda2_hat, k_=10*i_)
pp1 = plt.bar(xbin, density, width=xbin[1]-xbin[0], facecolor=c0_bl,
              edgecolor='none', label='sample eigenvalues below threshold')
pp2 = plt.plot(x_mp, y_mp*(1 - k_ / i_), color='g', lw=2,
               label='Marchenko-Pastur fit')
x_filtered = lambda2_bar[:k_ + 2]
density_filtered = np.r_['-1', np.ones((1, k_+1)), np.array([[i_ - k_]])]
pp3 = plt.plot(np.r_[x_filtered.reshape(1, -1), x_filtered.reshape(1, -1)],
               np.r_[np.zeros((1, k_ + 2)), density_filtered], color=c1_or,
               lw=2, label='filtered spectrum')
plt.xlim([0, 3*np.max(x_mp)])
plt.ylim([0, max([np.max(y_mp*(1 - k_ / i_)), np.max(density)])])
plt.legend(handles=[pp1, pp2[0], pp3[0]])
plt.title('Spectrum distribution')
plt.xlabel('eigenvalues')
num_plot = min(j_, 20)

# market risk driver path
fig1 = plt.figure(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)

# plot historical series
f1 = plt.plot(np.arange(t_ + 1),
              db_riskdrivers_series.iloc[:, d_plot - 1],
              lw=1)
# plot projected series
for j in range(num_plot):
    f1 = plt.plot(np.arange(t_ + 1, t_ + 1 + m_ + 1),
                  x_proj[j, :, d_plot - 1],
                  lw=1)

f, xp = histogram_sp(x_proj[:, -1, d_plot - 1], k_=10 * np.log(j_))
f1 = plt.barh(xp,
              f / 10,
              height=xp[1] - xp[0],
              left=t_ + 1 + m_,
              facecolor=[.3, .3, .3],
              edgecolor='k')
plt.title('Projected path: ' + risk_drivers_names[d_plot - 1],
          fontweight='bold',
          fontsize=20)
plt.xlabel('t (days)', fontsize=17)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
add_logo(fig1, set_fig_size=False)
fig1.tight_layout()
예제 #5
0
# invariants (S&P500 log-return)
epsi = np.diff(np.log(spx.SPX_close))  # S&P 500 index compounded return

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step02): Compute the time exponential decay probabilities

t_ = len(epsi)
t_star = t_
p_exp = exp_decay_fp(t_, tau_hl, t_star)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step03): Compute the effective number of scenarios

ens = effective_num_scenarios(p_exp)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_exp_decay_fp-implementation-step04): Compute flexible probabilities histogram

f_eps, x_eps = histogram_sp(epsi, p=p_exp, k_=10*np.log(t_))

# ## Plots

# +
# figure settings
plt.style.use('arpm')
grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
[color_map, p_colors] = colormap_fp(p_exp, np.min(p_exp), np.max(p_exp),
                                    grey_range, 0, 1, [1, 0])
myFmt = mdates.DateFormatter('%d-%b-%Y')
bar_dates = np.array(spx.date[1:])

# flexible probabilities profile
f, ax = plt.subplots(2, 1, figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0)
plt.sca(ax[0])
예제 #6
0
# simulated path, mean and standard deviation

fig, axs = plt.subplots(1, 1)

t_axis = np.busday_count(t_m[0], t_m) / 252
plt.plot(t_axis.reshape(-1, 1), r_t_hor[:j_sel, :].T, color=lgrey, lw=1)
plt.ylabel('Bond return')
plt.xlabel('horizon')
l2 = plt.plot(t_axis, mu_r_t_hor + sig_r_t_hor, color='r')
plt.plot(t_axis, mu_r_t_hor - sig_r_t_hor, color='r')
l1 = plt.plot(t_axis, mu_r_t_hor, color='g')
plt.grid(False)

# empirical pdf
p = np.ones(j_) / j_
y_hist, x_hist = histogram_sp(r_t_hor[:, -1], k_=10 * np.log(j_))
y_hist = y_hist * scale  # adapt the hist height to the current xaxis scale
shift_y_hist = deltat / 12 + y_hist

emp_pdf = plt.barh(x_hist,
                   y_hist,
                   left=t_axis[-1],
                   height=x_hist[1] - x_hist[0],
                   facecolor=lgrey,
                   edgecolor=lgrey)

plt.plot(shift_y_hist, x_hist, color=dgrey, lw=1)
plt.plot([t_axis[-1], t_axis[-1]], [x_hist[0], x_hist[-1]],
         color=dgrey,
         lw=0.5)
예제 #7
0
파일: s_sp_anova.py 프로젝트: s0ap/arpmRes
# ANOVA predictor
def chi(z):
    return m_x_0 * (z <= sigma_) + m_x_1 * (z > sigma_)


# -

# ## Plots:

# +
plt.style.use('arpm')

# marginal distributions
p_0 = r_sandp_given_0.shape[0] / j_
p_1 = r_sandp_given_1.shape[0] / j_
f_r_sandp_0, bin0 = histogram_sp(r_sandp_given_0,
                                 k_=int(np.log(len(r_sandp_given_0))))
f_r_sandp_1, bin1 = histogram_sp(r_sandp_given_1,
                                 k_=int(np.log(len(r_sandp_given_1))))

# colors
teal = [0.2344, 0.582, 0.5664]
light_green_1 = [0.8398, 0.9141, 0.8125]
light_green_2 = [0.4781, 0.6406, 0.4031]
light_grey = [0.7, 0.7, 0.7]
orange = [0.94, 0.35, 0]
markersize = 60
j_plot = 100  # number of plotted simulations
xlim = [-0.1, 1.1]
ylim = [max(bin0[0], bin1[0]), min(bin0[-1], bin1[-1])]
matplotlib.rc('axes', edgecolor='none')
예제 #8
0
# -

# ## Plots

# +
plt.style.use('arpm')
fig, ax = plt.subplots(4, 1)

xmin = -7
xmax = 7
ymin = -0.1
ymax = 0.65

# base distribution
plt.sca(ax[0])
f, xp = histogram_sp(x, p=p_base, k_=np.sqrt(j_))
plt.bar(xp, f, width=xp[1] - xp[0], facecolor=[.9, .9, .9], edgecolor='k')

sd_bar_base = np.linspace(mu_x_base - sig_x_base, mu_x_base + sig_x_base, 2)
plt.plot(sd_bar_base, [0, 0], 'b', lw=2, label='Standard deviation')
plt.plot(mu_x_base, 0, '.r', markersize=15, label='Expectation')
plt.title('Base distribution')

# updated distribution
plt.sca(ax[3])
f, xp = histogram_sp(x, p=p_upd, k_=np.sqrt(j_))
plt.bar(xp, f, width=xp[1] - xp[0], facecolor=[.9, .9, .9], edgecolor='k')

sd_bar_upd = np.linspace(mu_upd - sig_upd, mu_upd + sig_upd, 2)
plt.plot(sd_bar_upd, [0, 0], 'b', lw=2)
plt.plot(mu_upd, 0, '.r', markersize=15)
예제 #9
0
u = np.random.random(j_)
# -

# ## Step2: Compute the marginal (Gamma) simulations

# +
gamma1 = gamma(k1, scale=theta1)
gamma2 = gamma(k2, scale=theta2)
x1 = gamma1.ppf(u)
x2 = gamma2.ppf(u)
# -

# ## Step3: Compute the normalized histograms of marginal simulations

# +
f_x1, ksi_x1 = histogram_sp(x1)
f_x2, ksi_x2 = histogram_sp(x2)
# -

# ## Plots

# +
plt.style.use('arpm')
fig = plt.figure()
# colors
teal = [0.2344, 0.582, 0.5664]
light_grey = [0.6, 0.6, 0.6]
#
x1_min = min(x1)
x1_max = max(x1)
x2_min = min(x2)
예제 #10
0
df = pd.DataFrame(output)
df.to_csv('../../../databases/temporary-databases/db_pricing_zcb.csv')

# ## Plots

# +
plt.style.use('arpm')
n_ = sig2_pl.shape[1]
fig, ax = plt.subplots(n_, 1)

lgray = [.7, .7, .7]  # light gray
dgray = [.5, .5, .5]  # dark gray

for n in range(n_):
    # histogram of the zero coupon bond P&L
    plt.sca(ax[n])
    n_bins = round(15 * np.log(j_))  # number of histogram bins
    [f, x_f] = histogram_sp(pl_thor[:, [n]], p=(1/j_ * np.ones((j_, 1))),
                            k_=n_bins)
    hf = plt.bar(x_f, f, width=x_f[1] - x_f[0], facecolor=lgray,
                 edgecolor=dgray)
    if n == 0:
        plt.title(
            r'First zcb: distribution of the P&L at the horizon' +
            '\n' + r' $\tau$ = ' + str(tau_hor*21) + ' days')
    else:
        plt.title(r'Second zcb: distribution of the P&L at the horizon' +
                  '\n' + r' $\tau$ = ' +str(tau_hor*21) + ' days')
add_logo(fig, location=1)
plt.tight_layout()
예제 #11
0
    axs[0].set_position([0.05, 0.15, 0.65, 0.60])
    plt.sca(axs[0])
    t_axis = np.busday_count(t_m[0], t_m) / 252
    plt.plot(t_axis.reshape(-1, 1), y_plot[:j_sel, :].T, color=lgrey, lw=1)
    plt.yticks()
    plt.ylabel('Bond %s' % obj_plot)
    plt.xlabel('horizon')
    plt.xlim([np.min(t_axis), np.max(t_axis) + 3])
    l2 = plt.plot(t_axis, mu_plot + sig_plot, color='r')
    plt.plot(t_axis, mu_plot - sig_plot, color='r')
    l1 = plt.plot(t_axis, mu_plot, color='g')

    # empirical pdf
    p = np.ones(j_) / j_
    y_hist, x_hist = histogram_sp(y_plot[:, -1], k_=10 * np.log(j_))
    y_hist = y_hist / 10  # adapt the hist height to the current xaxis scale
    shift_y_hist = tau_hor / 12 + y_hist

    emp_pdf = plt.barh(x_hist, y_hist, left=t_axis[-1],
                       height=x_hist[1] - x_hist[0], facecolor=lgrey,
                       edgecolor=lgrey)

    plt.plot(shift_y_hist, x_hist, color=dgrey, lw=1)
    plt.plot([t_axis[-1], t_axis[-1]], [x_hist[0], x_hist[-1]], color=dgrey,
             lw=0.5)
    plt.legend(handles=[l1[0], l2[0], emp_pdf[0]],
               labels=['mean', ' + / - st.deviation', 'horizon pdf'])
    title = 'Coupon bond projected ' + obj_plot + ' at the horizon of ' + \
            str(tau_hor / 12) + ' years'
    plt.title(title)
예제 #12
0
tau_hl_smoo = 15
tau_hl_scor = 100
z_smooth = smoothing(c, tau_hl_smoo)  # smoothing
z = scoring(z_smooth, tau_hl_scor)  # scoring

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_smooth_kernel_fp-implementation-step03): Compute the smooth kernel probabilities

p_smooth = smooth_kernel_fp(z, z_star, h, gamma=gamma)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_smooth_kernel_fp-implementation-step04): Compute the effective number of scenarios

ens = effective_num_scenarios(p_smooth)

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_smooth_kernel_fp-implementation-step05): Compute flexible probabilities histogram

f_eps, x_eps = histogram_sp(epsi, p=p_smooth, k_=10*np.log(t_))

# ## Plots

# +
plt.style.use('arpm')

grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
[color_map, p_colors] = colormap_fp(p_smooth, np.min(p_smooth),
                                    np.max(p_smooth), grey_range, 0, 1,
                                    [1, 0])
plot_dates = np.array(date)
myFmt = mdates.DateFormatter('%d-%b-%Y')
date_tick = np.arange(84, t_-1, 800)

# VIX and market state
예제 #13
0
for m in range(0, m_ + 1):
    mu_thor[m], sig2_thor = meancov_sp(x_tnow_thor[:, m].reshape(-1, 1))
    sig_thor[m] = np.sqrt(sig2_thor)
# -

# ## Plots

# +
# preliminary settings
plt.style.use('arpm')
mydpi = 72.0
lgrey = [0.8, 0.8, 0.8]  # light grey
dgrey = [0.2, 0.2, 0.2]  # dark grey
t_m = np.arange(0, m_ + 1)
j_plot = 40  # number of paths to be plotted
h, b = histogram_sp(x_tnow_thor[:, -1], k_=10 * np.log(j_))
fig, ax = plt.subplots()
ax.set_facecolor('white')
# axis settings
min_x = np.min(
    [np.min(x_tnow_thor[:, :]) - 0.1, mu_thor[-1] - 4 * sig_thor[-1]])
max_x = np.max(
    [np.max(x_tnow_thor[:, -1]) + 0.1, mu_thor[-1] + 4 * sig_thor[-1]])
plt.axis([t_m[0], t_m[-1] + np.max(h) * 0.2 + 0.03, min_x, max_x])
plt.xlabel('time (days)')
plt.ylabel('Log-value')
plt.xticks(t_m)
plt.yticks()
plt.grid(False)
plt.title('Projection of %s log-value' % (stock))
예제 #14
0
out = out.join(pd.DataFrame({'nu': pd.Series(nu)}))
out.to_csv(
    '../../../databases/temporary-databases/db_calloption_estimation.csv',
    index=None)
del out
# -

# ## Plots

# +
plt.style.use('arpm')

# marginal distribution
fig = plt.figure(figsize=(1280 / 72, 720 / 72), dpi=72)

f_eps, x_eps = histogram_sp(epsi[:, i_plot - 1], p=p, k_=10 * np.log(t_))
bar_width = x_eps[1] - x_eps[0]
plt.bar(x_eps,
        f_eps.flatten(),
        width=bar_width,
        fc=[0.7, 0.7, 0.7],
        edgecolor=[0.5, 0.5, 0.5])

plt.title('Distribution of the selected invariant',
          fontweight='bold',
          fontsize=20)
plt.xlabel('Invariant', fontsize=17)
add_logo(fig, location=1, set_fig_size=False)
fig.tight_layout()

# copula correlation matrix
예제 #15
0
# -

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_aggregation_quad-implementation-step03): Scenarios for the portfolio P&L and its expectation

pi_h = pl @ h
mu_pi_h = np.mean(pi_h)

# ## Plots

# +
plt.style.use('arpm')

fig = plt.figure()
lgray = [.8, .8, .8]  # light gray
rescale_pdf = 200000
pdf_mc, bin_mc = histogram_sp(pi_h, p=(1 / j_ * np.ones(j_)), k_=n_bins)

# histogram obtained from exact pricing
plt.barh(bin_mc,
         pdf_mc * rescale_pdf,
         left=time2hor_tnow * 252,
         height=bin_mc[1] - bin_mc[0],
         facecolor=lgray,
         edgecolor=lgray,
         lw=2)

# saddle point approximation of the Quadn pdf
plot1 = plt.plot(time2hor_tnow * 252 + pdf_quadn * rescale_pdf,
                 quantile_quadn,
                 color='r')
# scenario-probability quantile
q_sp = quantile_sp(1 - c, pl_h)
# -

# ## Plots

# +
plt.style.use('arpm')
lgray = [.8, .8, .8]  # light gray
dgray = [.7, .7, .7]  # dark gray
fig = plt.figure()

# histogram of the portfolio's ex-ante P&L
j_ = pl_h.shape[0]
n_bins = np.round(10 * np.log(j_))  # number of histogram bins
y_hist, x_hist = histogram_sp(pl_h, p=1 / j_ * np.ones(j_), k_=n_bins)

# Cornish-Fisher quantile approximation and scenario-probability quantile
l1 = plt.plot(q_sp, 1 - c, 'b')
l2 = plt.plot(q_cf, 1 - c, 'r', linestyle='--', lw=1)
l3 = plt.bar(x_hist,
             y_hist / max(y_hist),
             width=x_hist[1] - x_hist[0],
             facecolor=lgray,
             edgecolor=dgray)
plt.xlim([np.min(q_cf), np.max(q_cf)])
plt.ylim([0, 1])  # set 'ylim' to [0, 0.1] to focus on the left tail only
leg = plt.legend(['MC quantile', 'Cornish Fisher approx', 'MC distribution'])
plt.title('Monte Carlo quantile and Cornish-Fisher approximation')
add_logo(fig)
예제 #17
0
n_ = len(h)  # number of the instruments
# scenarios for the standard normal random variable Z
z = simulate_normal(np.zeros(n_), np.eye(n_), j_, 'PCA')
sigma_riccati = solve_riccati(sig2, np.eye(n_))  # Riccati root of sigma2
mu = np.array([mu] * j_)  # duplicate expectation for j_ scenarios
v_tnow = np.array([v_tnow] * j_)  # duplicate initial values for j_scenarios
pi = np.exp(mu + z @ sigma_riccati) - v_tnow  # P&L's scenarios
p = np.ones(j_) / j_  # flat scenario-probabilities

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_certainty_equiv-implementation-step02): Compute the ex-ante performance scenarios

y_h = h @ pi.T  # ex-ante performance scenarios
# number of bins for the ex-ante performance histogram
bins = np.round(150 * np.log(j_))
# centers and heights of the bins
heights, centers = histogram_sp(y_h, p=p, k_=bins)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_certainty_equiv-implementation-step03): Compute the certainty-equivalent

# +
# Define the utility function and its inverse


def utility_exp(y, lam):  # exponential utility function
    return -np.exp(-lam * y)


def ce_exp(z, lam):  # inverse function
    return -np.log(-z) / lam

예제 #18
0
ax = plt.subplot2grid((1, 11), (0, 10))
cbar = np.linspace(v_max, v_min, 200)
plt.imshow(cbar.reshape(-1, 1), cmap=cm.jet, aspect='auto')
plt.xticks([])
tick = np.linspace(0, 199, 10, dtype=int)
plt.yticks(tick, np.round(cbar[tick], decimals=1))
plt.title('Scale')
plt.grid(False)
add_logo(fig2, axis=ax, size_frac_x=3 / 4)

# Histograms
n_plot = 3
fig3 = plt.figure()
for i in range(n_plot):
    f_t2, x_t2 = histogram_sp(v_t2[:, 2 * i + 1] / v_t2[:, 0], p=q_t2, k_=100)
    f_t1, x_t1 = histogram_sp(v_t1[:, 2 * i + 1] / v_t1[:, 0], p=q_t1, k_=20)
    x_m = v_t[2 * i + 1] / v_t[0]

    ax = plt.subplot2grid((n_plot, 2), (i, 0))
    plt.barh(x_t2,
             f_t2,
             x_t2[1] - x_t2[0],
             color=[0.8, 0.8, 0.8],
             edgecolor=[0, 0, 0])
    plt.plot([0, np.max(f_t2) + 0.01], [x_m, x_m], 'r')
    plt.ylim([x_m - 3, x_m + 2])

    ax = plt.subplot2grid((n_plot, 2), (i, 1))
    plt.barh(x_t1,
             f_t1,
예제 #19
0
df = pd.DataFrame(output)
df.to_csv('../../../databases/temporary-databases/db_aggregation_regcred.csv')
# -

# ## Plots

# +
plt.style.use('arpm')

n_bins = 350  # number of bins
x = np.zeros(n_bins)
y = np.zeros(n_bins)

# histogram of the conditional losses
y, x = histogram_sp(def_loss_z, p=p_j, k_=n_bins)

fig = plt.figure()
l_0 = plt.bar(x, y / np.max(y),
              width=np.diff(x, 1)[0], label='Conditional distribution')
l_1 = plt.plot(def_loss_grid, pdf_def_loss / np.sum(pdf_def_loss) * 10,
               'k-', label='Unconditional distribution')
l_2 = plt.plot(e[i_star], 0, 'ro', markersize=5, markeredgecolor='r',
               markerfacecolor='r', label='Conditional expectation')
plt.legend(loc=1, fontsize=14)
plt.ylim([0, 1.5])

plt.title('Regulatory credit'
          ' approximation for {n_counterparties} counterparties'.
          format(n_counterparties=n_))
예제 #20
0
# ## Plots

# +
plt.style.use('arpm')

l_ = 125
x = np.linspace(mu - 4, mu + 4, l_)

f_epsi = stats.norm.pdf(x, mu, 1)  # invariants' pdf

# histograms computations

# compute histogram
# sample mean histograms
m_hist, m_x = histogram_sp(m_hat)
# product estimator histograms
pi_hist, pi_x = histogram_sp(pi_hat)
# constant estimator histograms
k_hist, k_x = histogram_sp(k_hat.T, xi=np.arange(-33, 36))

l_m_hist, l_m_x = histogram_sp(l_m)
l_pi_hist, l_pi_x = histogram_sp(l_pi)
l_k_hist, l_k_x = histogram_sp(l_k, xi=np.arange(-33, 36))

colhist = [.8, .8, .8]
orange = [1, 0.4, 0]
green = [0.1, 0.8, 0]
dark = [0.2, 0.2, 0.2]
blue = [0, 0.4, 1]
예제 #21
0
mu = np.random.uniform(0, 1, size=n_)
sigma = np.random.uniform(0, 1, size=(n_, n_))
sigma2 = sigma @ sigma.T  # make sigma2 positive definite

# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_saddle_point_vs_mcfp_quadn-implementation-step01): Generate quadratic-normal scenarios

y, p_ = simulate_quadn(alpha, beta, gamma, mu, sigma2, j_)

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_saddle_point_vs_mcfp_quadn-implementation-step02): Compute the saddle point approximation of the pdf

y_grid = np.linspace(quantile_sp(0.001, y, p_), quantile_sp(0.999, y, p_), 500)
cdf, pdf = saddle_point_quadn(y_grid, alpha, beta, gamma, mu, sigma2)

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_saddle_point_vs_mcfp_quadn-implementation-step03): Compute Calculate the heights and bin centers of the normalized empirical histogram

f_hat, grid = histogram_sp(y, p=p_, k_=200)

# ## Plots

# +
plt.style.use('arpm')
darkred = [.9, 0, 0]
lightgrey = [.8, .8, .8]
plt.figure()
mydpi = 72.0
f = plt.figure(figsize=(1280.0 / mydpi, 720.0 / mydpi), dpi=mydpi)

gs = gridspec.GridSpec(2, 2)
gs.update(wspace=0.5, hspace=0.5)
ax1 = plt.subplot(gs[0, :])
ax1.bar(grid,
예제 #22
0
# +
lgrey = [0.8, 0.8, 0.8]  # light grey
dgrey = [0.4, 0.4, 0.4]  # dark grey

num_plot = min(j_, 20)
fig = plt.figure()

plt.xlim([0, m_+int(m_/3)])
for j in range(num_plot):
    plt.plot(np.arange(0, m_+1), v_call_thor[j, :], lw=1, color=lgrey)

l2 = plt.plot(np.arange(m_+1), mu_v+sig_v, 'r')
plt.plot(np.arange(m_+1), mu_v-sig_v, 'r')
l1 = plt.plot(np.arange(0, m_+1), mu_v, 'g')

y_hist, x_hist = histogram_sp(v_call_thor[:, m_], k_=50*np.log(j_))
y_hist = y_hist*2500
shift_y_hist = m_ + y_hist
# # empirical pdf
pdf = plt.barh(x_hist, y_hist, (max(x_hist)-min(x_hist))/(len(x_hist)-1),
               left=m_, facecolor=lgrey, edgecolor=lgrey,
               lw=2, label='horizon pdf')
plt.plot(shift_y_hist, x_hist, color=dgrey, lw=1)
plt.legend(handles=[l1[0], l2[0], pdf[0]],
           labels=['mean', ' + / - st.deviation', 'horizon pdf'])
plt.title("Call option projected value at the horizon")
add_logo(fig)
fig.tight_layout()

fig2 = plt.figure()
예제 #23
0
myFmt = mdates.DateFormatter('%d-%b-%y')

# call option P&L
fig, ax = plt.subplots(2, 1)
# scatter plot
dates = pd.to_datetime(dates)
ax[0].scatter(dates, pi_call, c=fpcolors, marker='.', cmap=cm)
ax[0].axis([min(dates), max(dates), np.min(pi_call), np.max(pi_call)])
ax[0].set_xticks(dates[d])
ax[0].xaxis.set_major_formatter(myFmt)
ax[0].set_title('Scatter plot call P&L')

# histogram
n_bins = np.round(20 * np.log(ens))
height_1, center_1 = histogram_sp(pi_call, p=p, k_=n_bins)
ax[1].bar(center_1, height_1, facecolor=colhist, edgecolor=colhistedge)
ax[1].set_xlim([np.min(pi_call), np.max(pi_call)])
ax[1].set_title('Histogram call P&L')
s1 = 'Mean   %1.3e \nSdev    %1.3e ' % (mu_pi_call, sigma_pi_call)

plt.text(np.max(pi_call),
         np.max(height_1),
         s1,
         horizontalalignment='right',
         verticalalignment='top')
add_logo(fig)
plt.tight_layout()

# put option P&L
fig, ax = plt.subplots(2, 1)
예제 #24
0
# Display regression plane, generic plane and observations of selected target
# variables and factors
plt.style.use('arpm')
fig1, ax = plt.subplots(1, 1, subplot_kw={'projection': '3d'})

ax.plot_wireframe(z_2, z_1, x_reg, edgecolor='b')
ax.scatter(x[:, spot[0]], z[:, spot[1]],
           z[:, spot[2]], marker='.', color='k')
plt.legend(['regression plane'])
plt.xlabel('factor %d' % (spot[2]+1), labelpad=10)
plt.ylabel('factor %d' % (spot[1]+1), labelpad=10)
ax.set_zlabel('mkt variable %d' % (spot[0]+1), labelpad=10)

# (untruncated) correlations among residuals
corr_u = c2_u[np.nonzero(c2_u)]  # reshape the correlations
n, xout = histogram_sp(corr_u)

add_logo(fig1)
plt.tight_layout()

fig2 = plt.figure()
plt.bar(xout, n, width=xout[1]-xout[0], facecolor=[.7, .7, .7], edgecolor='k')
plt.title('Correlations among residuals')

# (untruncated) correlations between factors and residuals
corr_uz = np.reshape(c_uz, (n_*k_,), 'F')  # reshape the correlations
n, xout = histogram_sp(corr_uz)

add_logo(fig2, location=1)
plt.tight_layout()
예제 #25
0
         label='BMS hedge')
plt.plot(s,
         np.squeeze(bs_curve_current + delta_fod * (s - v_stock_u[0, 0])),
         color='b',
         label='FoD hedge')
plt.plot(s, bs_payoff.flatten(), color='k')
plt.legend()
plt.ylabel('call option value')
plt.xlabel('underlying')
plt.title('Time to horizon: ' + str(m_) + ' days')

add_logo(fig)
plt.tight_layout()

fig = plt.figure()
f_hist, x_hist = histogram_sp(r_call[:, index], k_=100)
plt.bar(x_hist,
        f_hist.flatten(), (max(x_hist) - min(x_hist)) / (len(x_hist) - 1),
        color=lgray,
        edgecolor=lgray,
        linewidth=2)
f1_hist, x1_hist = histogram_sp(r_bms, k_=100)
plt.plot(x1_hist, f1_hist.flatten(), color='r', label='BMS hedged pdf')
f2_hist, x2_hist = histogram_sp(r_fod, k_=100)
plt.plot(x2_hist, f2_hist.flatten(), color='b', label='FoD hedged pdf')
plt.legend()
plt.title('Repriced call option return')

add_logo(fig)
plt.tight_layout()
예제 #26
0
# quasi-optimal portfolio
out = {db_v_tnow.columns[i]: h_qsi[i]
       for i in range(len(h_qsi))}
out = pd.DataFrame(out, index = [0])
if copula_marginal:
    out.to_csv(path+'db_final_portfolio.csv', index=False)
else:
    out.to_csv(path+'db_final_portfolio_historical.csv', index=False)
del out

# ## Plots

# +
plt.style.use('arpm')
fig1 = plt.figure(figsize=(1280.0/72.0, 720.0/72.0), dpi = 72.0)
f, xp = histogram_sp(y_h, p=p, k_=30)
xp = xp*1e-6
plt.bar(xp, f, width=xp[1]-xp[0], facecolor=[.3, .3, .3], edgecolor='k',
       label = 'Current holdings')
f, xp = histogram_sp(y_h_es_qsi, p=p, k_=30)
xp = xp*1e-6
plt.bar(xp, f, width=xp[1]-xp[0], facecolor=[.6, .6, .6, .9],
        edgecolor='k', label = 'Optimal holdings')
plt.title('Optimized portfolio ex-ante P&L distribution',
         fontsize=20, fontweight='bold')
plt.xlabel(r'$Y_h$ (million USD)', fontsize=17)
plt.legend(fontsize=17)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
add_logo(fig1, set_fig_size=False)
예제 #27
0
w2[n_minus] = -1

# HFP std of equal-weights portfolio
s_1_hat = np.sqrt(w1.T @ s2_x_hat @ w1)
# truncated std of equal-weights portfolio
s_1_trunc = np.sqrt(w1.T @ s2_x_trunc @ w1)

# HFP std of long-short portfolio
s_2_hat = np.sqrt(w2.T @ s2_x_hat @ w2)
# truncated std of long-short portfolio
s_2_trunc = np.sqrt(w2.T @ s2_x_trunc @ w2)
# -

# ## [Step 10](https://www.arpm.co/lab/redirect.php?permalink=s_pca_truncated_lfm-implementation-step10): Define data used for ploting of the histogram

[f_l, xi_l] = histogram_sp(c2_u_hat[np.triu_indices(c2_u_hat.shape[0], 1)])

# ## Plots

# +
# Figure specifications
plt.style.use('arpm')

# Histogram: correlations among residuals

fig = plt.figure()
mydpi = 72.0
fig = plt.figure(figsize=(1280.0 / mydpi, 720.0 / mydpi), dpi=mydpi)
h = plt.bar(xi_l,
            f_l,
            width=xi_l[1] - xi_l[0],
예제 #28
0
          fontsize=20)
ax3.xaxis.set_major_formatter(myFmt)
add_logo(fig1, location=1, set_fig_size=False)
fig1.tight_layout()

# marginal distributions

n_bins = 10 * np.log(t_)

hfp = plt.figure(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
ax = hfp.add_subplot(111)

if i_plot - 1 in ind_parametric:
    # HFP histogram
    f_eps, x_eps = histogram_sp(epsi_bonds[:, i_plot - 1],
                                p=p_bonds,
                                k_=n_bins)
    bar_width = x_eps[1] - x_eps[0]
    plt.bar(x_eps,
            f_eps.flatten(),
            width=bar_width,
            fc=[0.7, 0.7, 0.7],
            edgecolor=[0.5, 0.5, 0.5])

    # Student t fit
    plt.plot(
        x_eps,
        np.squeeze(
            tstu.pdf(x_eps, db_estimation_parametric[i_plot - 1]['nu'],
                     db_estimation_parametric[i_plot - 1]['mu'],
                     np.sqrt(db_estimation_parametric[i_plot - 1]['sig2']))))
예제 #29
0
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step02): Compute simulations of the target and factors

x = simulate_normal(mu_x, sigma2_x, j_)
z = (x - mu_x) @ e

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_display_corr_norm_ellips-implementation-step03): Perform computations for plots

x_bar = plot_ellipse(mu_x,
                     sigma2_x,
                     r=r,
                     display_ellipse=False,
                     plot_axes=True,
                     plot_tang_box=True,
                     color='k')
[f_z1, xi_z1] = histogram_sp(z[:, 0], k_=300)
[f_z2, xi_z2] = histogram_sp(z[:, 1], k_=300)

# ## Plots

# +
plt.style.use('arpm')

mydpi = 72.0
f = plt.figure(figsize=(1280.0 / mydpi, 720.0 / mydpi), dpi=mydpi)

# setup range
x_range = [-5, 5]

# long and short axis
u_axes0 = e @ (np.sqrt(lambda2) * np.array([[-r, r], [0, 0]]).T).T
예제 #30
0
z = scoring(z_smooth, tau_hl_scor)  # scoring

# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_min_entropy_fp-implementation-step03): Compute the flexible probabilities conditioned via minimum relative entropy

prior = exp_decay_fp(t_-1, tau_hl_prior)
# minimum relative entropy flexible probabilities
p_entropy = conditional_fp(z, z_star, alpha, prior)

# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_min_entropy_fp-implementation-step04): Compute the effective number of scenarios

ens = effective_num_scenarios(p_entropy)  # effective number of scenarios

# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_min_entropy_fp-implementation-step05): Compute the flexible probabilities histogram

n_bins = 10 * np.log(t_-1)
f_eps, x_eps = histogram_sp(epsi, p=p_entropy, k_=n_bins)  # flex. prob. hist.

# ## Plots

# +
plt.style.use('arpm')

grey_range = np.r_[np.arange(0, 0.6 + 0.01, 0.01), .85]
[color_map, p_colors] = colormap_fp(p_entropy, np.min(p_entropy),
                                    np.max(p_entropy), grey_range, 0, 1,
                                    [1, 0])
plot_dates = np.array(date)
myFmt = mdates.DateFormatter('%d-%b-%Y')
date_tick = np.arange(84, t_-2, 800)

# VIX and market state