def plot_bachelier_digital_option(_time, _timestep, _strike): ####### ## call helper function to generate sufficient symmetric binomials ####### size = int(_time / _timestep) sample = np.random.normal(0, np.sqrt(_timestep), size) path_bm = [sum(sample[0:n]) for n in range(size)] x = [_timestep * k for k in range(size)] remaining_time = [np.sqrt(_time - x_i) for x_i in x] ## theoretical option value over time on a single path path_digital_option = [ 1 - dist.standard_normal_cdf((_strike - bm) / rt) for (bm, rt) in zip(path_bm, remaining_time) ] hedge_proportion = [0] * (size) path_digital_option_hedge = [0] * (size) #### ## plot the trajectory of the process #### _t_remain = remaining_time[0] path_digital_option_hedge[0] = path_digital_option[0] hedge_proportion[0] = dist.standard_normal_pdf( (_strike - path_bm[0]) / _t_remain) / _t_remain for j in range(1, size): _t_remain = remaining_time[j] hedge_proportion[j] = dist.standard_normal_pdf( (_strike - path_bm[j - 1]) / _t_remain) / _t_remain path_digital_option_hedge[j] = path_digital_option_hedge[ j - 1] + sample[j] * hedge_proportion[j] mp = pu.PlotUtilities("Paths of Digital Option Value", 'Time', "Option Value") trackHedgeOnly = True if (trackHedgeOnly): mp.multiPlot(x, [path_digital_option, path_digital_option_hedge]) else: arg = [ 'Option Value', 'Hedge Proportion', 'Underlying Brownian Motion' ] colors = ['green', 'red', 'blue'] mp = pu.PlotUtilities("Paths of Digital Option Value", 'Time', "Option Value") mp.subPlots(x, [path_digital_option, hedge_proportion, path_bm], arg, colors)
def terminal_utility_histogram(_b, _r, _sigma, T, _sample_size): ##### ## plot the terminal utility of a stock vs an optimal strategy (for various utility functions) ##### sample = np.random.normal(0, T, _sample_size) alpha = .0 pi = (_b - _r) / (_sigma * _sigma) / (1 - alpha) sigma_pi = _sigma * pi b_pi = _r + pi * (_b - _r) sample_value_stock = [ np.exp((_b + 0.5 * _sigma * _sigma) * T + _sigma * 1. * ns) for ns in sample ] sample_value_pi = [ np.exp((b_pi + 0.5 * sigma_pi * sigma_pi) * T + sigma_pi * 1. * ns) for ns in sample ] ## ## we then turn the outcome into a histogram ## num_bins = 100 mp = pu.PlotUtilities( "Terminal Wealth for Stock and Mixed Portfolio for $\pi=${0}".format( pi), 'Outcome', 'Rel. Occurrence') labels = ['Stock', 'Portfolio'] mp.plotHistogram([sample_value_stock, sample_value_pi], num_bins, labels)
def stochastic_integral_hist(_steps, _paths, scaling): output_lhs = [0.] * (_paths) output_rhs = [0.] * (_paths) n = _steps * scaling delta_t_sq = 1. / np.sqrt(scaling) for m in range(_paths): normal_sample = np.random.normal(0., 1., n) increments_bm = [s * delta_t_sq for s in normal_sample] ### create Brownian Motion paths bm_path = [0.] * (n + 1) bm_path[1:n + 1] = np.cumsum(increments_bm) output_lhs[m] = sum( [f * g for f, g in zip(bm_path[0:n], increments_bm)]) output_rhs[m] = sum( [f * g for f, g in zip(bm_path[1:n + 1], increments_bm)]) num_bins = 50 mp = pu.PlotUtilities( 'Stochastic Integral $\int_0^t B(u) du $ for 2 approximations', 'Outcome', 'Rel. Occurrence') colors = ['#0059ff', '#db46e2', '#ffc800', '#99e45e'] mp.plotMultiHistogram([output_lhs, output_rhs], num_bins, colors)
def binomial_plot(p, n): x_ax = [k for k in range(n)] y_ax = [dist.binomial_pdf(p, k, n) for k in range(n)] # poisson distribution mp = pu.PlotUtilities("Binomial Distribution for p={0}".format(p), "# Successes", "Probability") mp.multiPlot(x_ax, [y_ax], 'o')
def binomial_lln(sample_size, p): ###### ## Step 1 - create sample of independent uniform random variables lower_bound = 0. upper_bound = 1. uni_sample = np.random.uniform(lower_bound, upper_bound, sample_size) ###### ## Step 2 - transform them to $B(1,p)$ distribution sample = [dist.binomial_inverse_cdf(p, u) for u in uni_sample] x_ax = [k for k in range(sample_size)] # values on the x axis n_plots = 2 y_ax = [[0.] * sample_size for j in range(n_plots)] # y_values (1) - actual average y_ax[1] = [p for x in range(sample_size)] # y_values (0) - cumulative average of all the samples y_ax[0] = [sum(sample[0:k + 1]) / (k + 1) for k in range(sample_size)] mp = pu.PlotUtilities("Cumulative Average", 'x', 'Average') mp.multiPlot(x_ax, y_ax)
def compounding_plot(rate, freq, min_val, max_val, steps): step = (max_val - min_val) / steps n_plots = len(freq) # values on the x axis x_ax = [0.] * steps for k in range(steps): x_ax[k] = min_val + step * k ## container for y axis y_ax = [0.] * n_plots for k in range(n_plots): y_ax[k] = [0.] * steps starting_value = 1. for k in range(n_plots): ####### ### linear interpolation on a grid given by the compounding frequency ####### __n = int((max_val - min_val) / freq[k]) + 1 x_temp = [min_val] * __n y_temp = [starting_value] * __n for m in range(1, __n): x_temp[m] = min_val + float(m) * freq[k] y_temp[m] = y_temp[m-1] * (1. + rate * freq[k]) y_ax[k] = np.interp(x_ax, x_temp, y_temp) mp = pu.PlotUtilities('Compounding Account Value', 'x', 'Value') mp.multiPlot(x_ax, y_ax)
def distorted_plot(rate, vols, min_val, max_val, steps): step = (max_val - min_val) / steps n_plots = len(vols) # values on the x axis x_ax = [0.] * steps for k in range(steps): x_ax[k] = min_val + step * k ## container for y axis starting_value = 1. y_ax = [0.] * n_plots for k in range(n_plots): y_ax[k] = [starting_value] * steps for k in range(n_plots): for m in range(1, steps): if (vols[k] <= 0. ): random_shock = 0. else: random_shock = np.random.normal(0, vols[k] * np.sqrt(step), 1) y_ax[k][m] = y_ax[k][m-1] * (1. + rate * step + random_shock) mp = pu.PlotUtilities('Compounding Account Value With Noise', 'x', 'Value') mp.multiPlot(x_ax, y_ax)
def compounding_plot(rate, freq, min_val, max_val, steps): step = (max_val - min_val) / steps # values on the x axis x_ax = [min_val + step * k for k in range(steps)] ## container for y axis y_ax = [ [0.] * steps for f in freq] starting_value = 1. k = 0 for f in freq: ####### ### linear interpolation on a grid given by the compounding frequency ####### __n = int((max_val - min_val) / f) + 1 x_temp = [min_val + float(m) * f for m in range(__n)] y_temp = [starting_value] * __n for m in range(1, __n): y_temp[m] = y_temp[m-1] * (1. + rate * f) y_ax[k] = np.interp(x_ax, x_temp, y_temp) k = k + 1 mp = pu.PlotUtilities('Compounding Account Value', 'x', 'Value') mp.multiPlot(x_ax, y_ax)
def si_integrand_only(_steps, _paths, scaling): output_lhs_non_si = [0.] * (_paths) output_rhs_non_si = [0.] * (_paths) n = _steps * scaling delta_t = 1. / scaling delta_t_sq = 1. / np.sqrt(scaling) for m in range(_paths): ### create normal increments scaled to the right time step normal_sample = np.random.normal(0., 1., n) increments_bm = [s * delta_t_sq for s in normal_sample] ### create Brownian Motion paths bm_path = [0.] * (n + 1) bm_path[1:n + 1] = np.cumsum(increments_bm) output_lhs_non_si[m] = sum(bm_path[0:n]) * delta_t output_rhs_non_si[m] = sum(bm_path[1:n + 1]) * delta_t num_bins = 50 mp = pu.PlotUtilities( 'Stochastic Integral $\int_0^t B(u) du $ for 2 approximations', 'Outcome', 'Rel. Occurrence') colors = ['#0059ff', '#db46e2', '#ffc800', '#99e45e'] mp.plotMultiHistogram([output_lhs_non_si, output_rhs_non_si], num_bins, colors)
def binomial_lln_hist(sample_size, repeats, p): ### plot histogram of Average value of normalised Binomial Distributions sample_value = [0.] * repeats num_bins = 35 for i in range(repeats): ###### ## Step 1 - create sample of independent uniform random variables lower_bound = 0. upper_bound = 1. uni_sample = np.random.uniform(lower_bound, upper_bound, sample_size) ###### ## Step 2 - transform them to $B(1,p)$ distribution sample = [dist.binomial_inverse_cdf(p, u) for u in uni_sample] sample_value[i] = (sum(sample) - sample_size * p) / np.sqrt( p * (1 - p)) / sample_size mp = pu.PlotUtilities( "Histogram of Normalised Binomial Average For Sample of Size={0}". format(sample_size), 'Outcome', 'Rel. Occurrence') mp.plotHistogram(sample_value, num_bins)
def correlated_defaults_scatter(lambda_1, lambda_2, rhos, size): tau_2 = [0] * len(rhos) sns = np.random.standard_normal(2 * size) x = [sns[k] for k in range(size)] tau_1 = [ dist.exponential_inverse_cdf(lambda_1, dist.standard_normal_cdf(x1)) for x1 in x ] index = 0 for rho in rhos: y = [ rho * sns[k] + np.sqrt(1 - rho * rho) * sns[k + size] for k in range(size) ] tau_2[index] = [ dist.exponential_inverse_cdf(lambda_2, dist.standard_normal_cdf(y1)) for y1 in y ] index = index + 1 ### scatter plot of the simulated defaults colors = ['blue', 'green', 'orange', 'red', 'yellow'] mp = pu.PlotUtilities('Default Times with Correlations={0}'.format(rhos), 'x', 'y') mp.scatterPlot(tau_1, tau_2, rhos, colors)
def poisson_plot(lam, upper_bound): n = upper_bound + 1 x_ax = [k for k in range(n)] y_ax = [dist.poisson_pdf(lam, k) for k in range(n)] # poisson distribution mp = pu.PlotUtilities("Poisson Distribution for lambda={0}".format(lam), "# Successes", "Probability") mp.multiPlot(x_ax, [y_ax], 'o')
def uniform_histogram(sz): uniform_sample = getUniformSample(sz) num_bins = 50 hp = pu.PlotUtilities("Histogram of Uniform Sample of Size={0}".format(sz), 'Outcome', 'Rel. Occurrence') hp.plotHistogram(uniform_sample, num_bins)
def plotPDFvsCDF(dist, min_val, max_val, steps, chart_title): step = (max_val - min_val) / steps x_ax = [min_val + step * k for k in range(steps)] pdf_value = [dist.pdf(x_val) for x_val in x_ax] cdf_value = [dist.cdf(x_val) for x_val in x_ax] mp = pu.PlotUtilities(chart_title, 'x', 'unused') mp.subPlots(x_ax, [pdf_value, cdf_value], ['PDF', 'CDF'], ['blue', 'red'])
def binomial_histogram(p, sz): sample = [dist.binomial_inverse_cdf(p, u) for u in getUniformSample(sz)] num_bins = 100 hp = pu.PlotUtilities( "Histogram of Binomial Sample with Success Probability={0}".format(p), 'Outcome', 'Rel. Occurrence') hp.plotHistogram(sample, num_bins)
def plotMultiDistributions(distrib, min_val, max_val, steps, chart_title): x_label = 'x' y_label = 'PDF Value' step = (max_val - min_val) / steps x_ax = [min_val + step * k for k in range(steps)] y_ax = [[dist.pdf(x_val) for x_val in x_ax] for dist in distrib] mp = pu.PlotUtilities(chart_title, x_label, y_label) mp.multiPlot(x_ax, y_ax)
def uniform_histogram_powers(sz, powers): lower_bound = 0. upper_bound = 1. uniform_sample = np.random.uniform(lower_bound, upper_bound, sz) num_bins = 25 samples = [[np.power(u, pow) for u in uniform_sample] for pow in powers] mp = pu.PlotUtilities("Histogram of Uniform Sample of Size={0}".format(sz), 'Outcome', 'Rel. Occurrence') mp.plotHistogram(samples, num_bins, powers)
def comparison_poi_binom(lam, upper_bound): n = upper_bound + 1 x_ax = [k for k in range(n)] n_plots = 2 # plotting both poisson and binomial distribution y_ax = [0.] * n_plots y_ax[0] = [dist.poisson_pdf(lam, k) for k in range(n)] # poisson distribution y_ax[1] = [dist.binomial_pdf(lam / n, k, n) for k in range(n)] mp = pu.PlotUtilities("Poisson Vs Binomial distribution for lambda={0}".format(lam), "# Successes", "Probability") mp.multiPlot(x_ax, y_ax, '*')
def geometric_brownian_motion(_time, _timestep, _number_paths, bM): size = int(_time / _timestep) total_sz = size * _number_paths sample = np.random.normal(0, 1, total_sz) paths = [0.] * _number_paths max_paths = [0.] * _number_paths # set up x-axis x = [_timestep * k for k in range(size + 1)] #### ## plot the trajectory of the process #### i = 0 for k in range(_number_paths): path = [bM.initialValue()] * (size + 1) # max_path = [bM.initialValue()] * (size + 1) for j in range(size + 1): if (j == 0): continue ## nothing else: path[j] = bM.nextSample(path[j - 1], sample[i]) # max_path[j] = max(max_path[j - 1], path[j]) i = i + 1 paths[k] = path # max_paths[k] = max_path max_paths = [[max(path[0:j]) for j in range(1, len(path) + 1)] for path in paths] # max_paths = [max(path[0:j]) for j in range(1,len(path)+1) for path in paths] # print (max_paths) # max_paths = [ [ max([path[j] for j in range(n)]) for n in range(len(path))] for path in paths] mp = pu.PlotUtilities(r'Paths of ' + str(bM.type()), 'Time', 'Random Walk Value') plot_max = True if (plot_max): plot_all_max = False if (plot_all_max): mp.multiPlot(x, max_paths) else: # only the first path and its running maximum thesePaths = [0.] * 2 thesePaths[0] = paths[0] thesePaths[1] = max_paths[0] mp.multiPlot(x, thesePaths) else: mp.multiPlot(x, paths)
def uniform_histogram(sz): lower_bound = 0. upper_bound = 1. sample = np.random.uniform(lower_bound, upper_bound, sz) num_bins = 50 hp = pu.PlotUtilities("Histogram of Uniform Sample of Size={0}".format(sz), 'Outcome', 'Rel. Occurrence') hp.plotHistogram(sample, num_bins)
def plotVasicekDistribution(rhos, p, min_val, max_val, steps): x_label = 'x' y_label = 'CDF Value' chart_title = 'Vasicek Large Portfolio Distribution' step = (max_val - min_val) / steps x_ax = [min_val + step * k for k in range(steps)] y_ax = [[vasicek_large_portfolio_cdf(rho, p, x_val) for x_val in x_ax] for rho in rhos] mp = pu.PlotUtilities(chart_title, x_label, y_label) mp.multiPlot(x_ax, y_ax)
def normal_histogram(mu, var, sz): nd = dist.NormalDistribution(mu, var) ####### ### transform the uniform sample ####### sample = [nd.inverse_cdf(u) for u in getUniformSample(sz)] num_bins = 60 hp = pu.PlotUtilities( "Histogram of Normal Sample with Mean={0}, Variance={1}".format( mu, var), 'Outcome', 'Rel. Occurrence') hp.plotHistogram(sample, num_bins)
def plot_maximising_goal_probability(_time, _timestep, _initial_capital, _target, _b, _r, _sigma): ####### ## call helper function to generate sufficient symmetric binomials ####### size = int(_time / _timestep) - 1 sample = np.random.normal(0, np.sqrt(_timestep), size) path_underlying = [1.] * (size) path_wealth = [_initial_capital] * (size) path_portfolio = [0] * (size) x = [_timestep * k for k in range(size)] _theta = (_b - _r) / _sigma _y0 = np.sqrt(_time) * dist.standard_normal_inverse_cdf( _initial_capital * np.exp(_r * _time) / _target) #### ## create the various paths for plotting #### bm = 0 _y = path_wealth[0] * np.exp(_r * _time) / _target path_portfolio[0] = dist.standard_normal_pdf( dist.standard_normal_inverse_cdf(_y)) / (_y * _sigma * np.sqrt(_time)) for j in range(1, size): _t_remain = _time - x[j] _t_sq_remain = np.sqrt(_t_remain) path_underlying[j] = path_underlying[j - 1] * (1. + _b * _timestep + _sigma * sample[j]) bm = bm + sample[j] + _theta * _timestep path_wealth[j] = _target * np.exp( - _r * _t_remain ) * \ dist.standard_normal_cdf((bm + _y0) / _t_sq_remain) _y = path_wealth[j] * np.exp(_r * _t_remain) / _target path_portfolio[j] = dist.standard_normal_pdf( dist.standard_normal_inverse_cdf(_y)) / (_y * _sigma * _t_sq_remain) mp = pu.PlotUtilities("Maximising Probability of Reaching a Goal", 'Time', "None") labels = ['Stock Price', 'Wealth Process', 'Portfolio Value'] mp.subPlots(x, [path_underlying, path_wealth, path_portfolio], labels, ['red', 'blue', 'green'])
def standard_brownian_motion(_steps, _paths, scaling): scaled_steps = int(_steps * scaling) samples = [ random_sample_normal(np.sqrt(1. / scaling), scaled_steps) for k in range(_paths) ] x = [float(k / float(scaling)) for k in range(scaled_steps + 1)] paths = [[sum(sample[0:k]) for k in range(len(sample) + 1)] for sample in samples] mp = pu.PlotUtilities('Paths of Standard Brownian Motion', 'Time', 'Random Walk Value') mp.multiPlot(x, paths)
def random_walk(_p, _steps, _paths, scaling): scaled_steps = int(_steps * scaling) samples = [ random_sample_sym_binomial(_p, scaled_steps) for k in range(_paths) ] x = [float(k / float(scaling)) for k in range(scaled_steps + 1)] paths = [[ sum(sample[0:k]) / np.sqrt(scaling) for k in range(len(sample) + 1) ] for sample in samples] mp = pu.PlotUtilities( "Paths of Random Walk with Probability={0}".format(p), 'Time', 'Random Walk Value') mp.multiPlot(x, paths)
def random_walk_terminal_histogram(_p, _steps, _paths, scaling): scaled_steps = _steps * scaling samples = [ random_sample_sym_binomial(_p, scaled_steps) for k in range(_paths) ] terminal_value = [ sum([s for s in sample]) / np.sqrt(scaling) for sample in samples ] mp = pu.PlotUtilities( "Distribution of Terminal Value of Random Walk with Probability={0}". format(p), 'Value', 'Rel. Occurrence') mp.plotHistogram(terminal_value, 21)
def default_process_trajectories(intensity, sample_size): uni_sample = np.random.uniform(0., 1., sample_size) sampled_default_time = [dist.exponential_inverse_cdf(intensity, u) for u in uni_sample] max_time = 3. / intensity step_size = 0.01 steps = int(max_time / step_size) x = [k * step_size for k in range(steps)] y = [[(0. if sdf > x_v else 1.) for x_v in x] for sdf in sampled_default_time] ####### ### prepare and show plot ### mp = pu.PlotUtilities("Trajectories of Default Time Indicator With Intensity = {0}".format(intensity), 'Time', 'Default Indicator') mp.multiPlot(x, y)
def variance_normal_digital(strike): x_label = 'shift' y_label = 'Variance' chart_title = 'Sample Variance Under Exponential Tilting (Normal)' min_val = strike - 2. max_val = strike + 2. steps = 1000 step = (max_val - min_val) / steps x_ax = [min_val + step * k for k in range(steps)] y_ax = [ np.exp(x * x) * dist.standard_normal_cdf(-strike - x) for x in x_ax ] mp = pu.PlotUtilities(chart_title, x_label, y_label) mp.multiPlot(x_ax, [y_ax])
def plot_bachelier_option_price(start, vol): lower_bound = start - 10. upper_bound = start + 10. step = 0.01 n_steps = int((upper_bound - lower_bound) / step) nd = dist.NormalDistribution(0., 1.) knock_out = 5 x_ax = [lower_bound + k * step for k in range(n_steps)] y_ax = [ vol * nd.pdf( (max(x,knock_out) - start) / vol ) - (x - start) * nd.cdf( (start - max(x, knock_out)) / vol ) for x in x_ax] # poisson distribution mp = pu.PlotUtilities("Bachelier Option Value as Function of Strike", "Option Strike", "Option Value") mp.multiPlot(x_ax, [y_ax], '-')
def variance_exponential_digital(strike, intensity): _l = intensity x_label = 'shift' y_label = 'Variance' chart_title = 'Sample Variance Under Exponential Tilting (Exponential)' min_val = 0. # 1./strike * 0.2 max_val = intensity - 0.001 steps = 1000 step = (max_val - min_val) / steps x_ax = [min_val + step * k for k in range(steps)] y_ax = [ np.exp(-(_l + x) * strike) * _l * _l / (_l * _l - x * x) for x in x_ax ] mp = pu.PlotUtilities(chart_title, x_label, y_label) mp.multiPlot(x_ax, [y_ax])