def delta_vega(k: float, spot: float, sigma: float, t: float): x = np.log(spot) k_lg = np.log(k) sqrt_t = np.sqrt(t) d_1 = (x - k_lg) / (sigma * sqrt_t) + 0.5 * sigma * sqrt_t partial_sigma_d_1 = - (x - k_lg) / (sigma * sigma * sqrt_t) + 0.5 * sqrt_t return spot * AnalyticTools.normal_pdf(0.0, 1.0, d_1) * partial_sigma_d_1 / (sigma * t)
def get_path_multi_step( t0: float, t1: float, parameters: Types.ndarray, f0: float, v0: float, no_paths: int, no_time_steps: int, type_random_number: Types.TYPE_STANDARD_NORMAL_SAMPLING, rnd_generator, **kwargs) -> map: nu_1 = parameters[0] nu_2 = parameters[1] theta = parameters[2] rho = parameters[3] rho_inv = np.sqrt(1.0 - rho * rho) no_paths = 2 * no_paths if type_random_number == Types.TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = get_time_steps(t0, t1, no_time_steps, **kwargs) no_time_steps = len(t_i) delta_t_i = np.diff(t_i) x_t = np.empty((no_paths, no_time_steps)) v_t = np.empty((no_paths, no_time_steps)) int_variance_t_i = np.empty((no_paths, no_time_steps - 1)) x_t[:, 0] = np.log(f0) v_t[:, 0] = v0 v_t_1_i_1 = np.empty(no_paths) v_t_2_i_1 = np.empty(no_paths) v_t_1_i_1[:] = v0 v_t_2_i_1[:] = v0 map_output = {} for i_step in range(1, no_time_steps): z_i = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) z_sigma = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) v_t[:, i_step] = get_variance_sampling(t_i[i_step - 1], t_i[i_step], v_t_1_i_1, v_t_2_i_1, theta, nu_1, nu_2, z_sigma) int_variance_t_i[:, i_step - 1] = 0.5 * ( v_t[:, i_step - 1] * delta_t_i[i_step - 1] + v_t[:, i_step - 1] * delta_t_i[i_step - 1]) x_t[:, i_step] = x_t[:, i_step - 1] - 0.5 * int_variance_t_i[:, i_step - 1] + \ AnalyticTools.dot_wise(np.sqrt(v_t[:, i_step - 1]), (rho * z_sigma + rho_inv * z_i) * np.sqrt(delta_t_i[i_step - 1])) map_output[Types.MIXEDLOGNORMAL_OUTPUT.SPOT_VARIANCE_PATHS] = v_t map_output[Types.MIXEDLOGNORMAL_OUTPUT.TIMES] = t_i map_output[ Types.MIXEDLOGNORMAL_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_variance_t_i map_output[Types.MIXEDLOGNORMAL_OUTPUT.PATHS] = np.exp(x_t) return map_output
def quartic_kernel_estimator_slv(v_t: Types.ndarray, x_t: Types.ndarray, x: Types.ndarray, h: float): no_x = len(x) estimator = np.zeros(no_x) for i in range(0, no_x): k_x_i = quartic_kernel(x_t - x[i], h) estimator[i] = AnalyticTools.scalar_product(v_t, k_x_i) / np.sum(k_x_i) return estimator
def get_integrated_variance_fourier(path: Types.ndarray, t_k: Types.ndarray, freq_sampling: int, no_paths: int): no_time_steps = len(t_k) sigma_n = np.zeros(no_paths) n_kernel = int(len(t_k) * 0.5) for k in range(0, no_paths): for i in range(0, no_time_steps - freq_sampling, freq_sampling): delta_x_i = path[k, i + freq_sampling] - path[k, i] for j in range(0, no_time_steps - freq_sampling, freq_sampling): diff = (t_k[i] - t_k[j]) * (2.0 * np.pi / t_k[-1]) delta_x_j = path[k, j + freq_sampling] - path[k, j] sigma_n[k] += AnalyticTools.dirichlet_kernel(diff, n_kernel) * delta_x_i * delta_x_j return sigma_n
def get_spot_variance_fourier(path: Types.ndarray, t_k: Types.ndarray, no_paths: int, t: float): no_time_steps = len(t_k) # n_kernel = int(0.5 * no_time_steps) n_kernel = int(0.85 * np.power(no_time_steps, 0.75)) b = 0.125 # m_kernel = int(b * (0.5 / np.pi) * np.sqrt(no_time_steps) * np.log(no_time_steps)) + 4.2 m_kernel = np.maximum(int((1.0 / (2.0 * np.pi)) * 0.125 * np.sqrt(np.power(no_time_steps, 0.75)) * np.log(np.power(no_time_steps, 0.75))), 1.0) sigma_n = np.zeros(no_paths) # coefficients = np.zeros(shape=(no_paths, 2 * m_kernel + 1)) # for m in range(0, 2 * m_kernel + 1): # coefficients[:, m] = get_fourier_coefficient(path, t_k, no_paths, m - m_kernel) # # spot_variance_estimation = np.zeros(no_paths) # t_new = (2.0 * np.pi / t_k[-1]) * t # for k in range(0, no_paths): # aux_var = 0.0 # for m in range(0, 2 * m_kernel + 1): # aux_var += (1.0 - np.abs(m - m_kernel) / m_kernel) * np.exp(1j * (m - m_kernel) * t_new) * coefficients[k, m] # # spot_variance_estimation[k] = aux_var.real # # return spot_variance_estimation for k in range(0, no_paths): for i in range(0, no_time_steps - 1): delta_x_i = path[k, i + 1] - path[k, i] for j in range(0, no_time_steps - 1): diff = (t_k[j] - t_k[i]) * (2.0 * np.pi / t_k[-1]) diff_t = (t - t_k[j]) * (2.0 * np.pi / t_k[-1]) delta_x_j = path[k, j + 1] - path[k, j] dirichlet_kernel = AnalyticTools.dirichlet_kernel(diff, n_kernel) fejer_kernel = AnalyticTools.fejer_kernel(diff_t, m_kernel) sigma_n[k] += (dirichlet_kernel * fejer_kernel * delta_x_i * delta_x_j) # return 0.5 * sigma_n / np.pi return sigma_n
def get_path_multi_step( t0: float, t1: float, f0: float, no_paths: int, no_time_steps: int, type_random_number: Types.TYPE_STANDARD_NORMAL_SAMPLING, local_vol: Callable[[float, Types.ndarray], Types.ndarray], rnd_generator, **kwargs) -> map: no_paths = 2 * no_paths if type_random_number == Types.TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = get_time_steps(t0, t1, no_time_steps, **kwargs) no_time_steps = len(t_i) t_i = np.linspace(t0, t1, no_time_steps) delta_t_i = np.diff(t_i) x_t = np.empty((no_paths, no_time_steps)) int_v_t = np.empty((no_paths, no_time_steps - 1)) v_t = np.empty((no_paths, no_time_steps)) x_t[:, 0] = np.log(f0) v_t[:, 0] = local_vol(t0, x_t[:, 0]) sigma_i_1 = np.zeros(no_paths) sigma_i = np.zeros(no_paths) sigma_t = np.zeros(no_paths) x_t_i_mean = np.zeros(no_paths) map_output = {} for i_step in range(1, no_time_steps): z_i = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) np.copyto(sigma_i_1, local_vol(t_i[i_step - 1], x_t[:, i_step - 1])) np.copyto(x_t_i_mean, x_t[:, i_step - 1] - 0.5 * np.power(sigma_i_1, 2.0)) np.copyto(sigma_i, local_vol(t_i[i_step], x_t_i_mean)) np.copyto(sigma_t, 0.5 * (sigma_i_1 + sigma_i)) v_t[:, i_step] = np.power(sigma_t, 2.0) int_v_t[:, i_step - 1] = v_t[:, i_step] * delta_t_i[i_step - 1] x_t[:, i_step] = np.add( x_t[:, i_step - 1], -0.5 * v_t[:, i_step] * delta_t_i[i_step - 1] + np.sqrt(delta_t_i[i_step - 1]) * AnalyticTools.dot_wise(sigma_t, z_i)) map_output[Types.LOCAL_VOL_OUTPUT.TIMES] = t_i map_output[Types.LOCAL_VOL_OUTPUT.PATHS] = np.exp(x_t) map_output[Types.LOCAL_VOL_OUTPUT.SPOT_VARIANCE_PATHS] = v_t map_output[Types.LOCAL_VOL_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_v_t return map_output
def generate_paths_variance_rbergomi(s0: float, sigma_0: float, nu: float, h: float, noise: ndarray, cholk_cov: ndarray, t_i_s: ndarray, no_paths: int): no_time_steps = len(t_i_s) paths = np.zeros(shape=(no_paths, no_time_steps)) int_v_t = np.zeros(shape=(no_paths, no_time_steps - 1)) v_i_1 = np.zeros(shape=(no_paths, no_time_steps)) v_i_1[:, 0] = sigma_0 paths[:, 0] = s0 # we compute before a loop of variance of the variance process var_w_t = get_volterra_variance(t_i_s[1:], h) for k in range(0, no_paths): w_t_k = AnalyticTools.apply_lower_tridiagonal_matrix( cholk_cov, noise[:, k]) w_i_s_1 = 0.0 w_i_h_1 = 0.0 var_w_t_i_1 = 0.0 for j in range(1, no_time_steps): delta_i_s = t_i_s[j] - t_i_s[j - 1] # Brownian and Gaussian increments d_w_i_s = w_t_k[j - 1] - w_i_s_1 d_w_i_h = w_t_k[j + no_time_steps - 2] - w_i_h_1 v_i_1[k, j] = v_i_1[k, j - 1] * np.exp(-0.5 * nu * nu * (var_w_t[j - 1] - var_w_t_i_1) + nu * d_w_i_h) int_v_t[k, j - 1] = delta_i_s * 0.5 * (v_i_1[k, j - 1] + v_i_1[k, j]) paths[k, j] = paths[k, j - 1] * np.exp(-0.5 * int_v_t[k, j - 1] + np.sqrt(v_i_1[k, j - 1]) * d_w_i_s) # Keep the last brownians and variance of the RL process w_i_s_1 = w_t_k[j - 1] w_i_h_1 = w_t_k[j + no_time_steps - 2] var_w_t_i_1 = var_w_t[j - 1] return paths, v_i_1, int_v_t
def get_fourier_coefficient(path: Types.ndarray, t_k: Types.ndarray, no_paths: int, s: int): no_time_steps = len(t_k) n_kernel = int(0.5 * no_time_steps) coefficients = np.empty(no_paths) for k in range(0, no_paths): aux_value = 0 for i in range(0, no_time_steps - 1): delta_x_i = path[k, i + 1] - path[k, i] for j in range(0, no_time_steps - 1): diff = (t_k[j] - t_k[i]) * (2.0 * np.pi / t_k[-1]) delta_x_j = path[k, j + 1] - path[k, j] dirichlet_kernel = AnalyticTools.dirichlet_kernel(diff, n_kernel) aux_value += delta_x_i * delta_x_j * dirichlet_kernel * np.exp(-1j * (2.0 * np.pi / t_k[-1]) * t_k[j] * s) coefficients[k] = aux_value.real return coefficients
rng = RNG.RndGenerator(seed) paths = fBM.cholesky_method(t0, t1, z0, rng, hurst_parameter, no_paths, int(no_time_steps * t1)) # Time steps to compute t = np.linspace(t0, t1, int(no_time_steps * t1)) # Compute mean empirical_mean = np.mean(paths, axis=0) # Compute variance empirical_variance = np.var(paths, axis=0) exact_variance = [fBM.covariance(t_i, t_i, hurst_parameter) for t_i in t] # Compute covariance no_full_time_steps = len(t) empirical_covariance = np.zeros(shape=(no_time_steps, no_full_time_steps)) exact_covariance = np.zeros(shape=(no_time_steps, no_full_time_steps)) for i in range(0, no_time_steps): for j in range(0, i): empirical_covariance[i, j] = np.mean(AnalyticTools.dot_wise(paths[:, i], paths[:, j])) - \ empirical_mean[i] * empirical_mean[j] exact_covariance[i, j] = fBM.covariance(t[i], t[j], hurst_parameter) empirical_covariance[j, i] = empirical_covariance[i, j] exact_covariance[j, i] = exact_covariance[i, j] error_covariance = np.max(np.abs(empirical_covariance - exact_covariance)) error_variance = np.max(np.abs(exact_variance - empirical_variance)) error_mean = np.max(np.abs(empirical_mean))
def get_path_multi_step( t0: float, t1: float, parameters: Vector, f0: float, v0: float, no_paths: int, no_time_steps: int, type_random_numbers: Types.TYPE_STANDARD_NORMAL_SAMPLING, rnd_generator) -> ndarray: k = parameters[0] theta = parameters[1] epsilon = parameters[2] rho = parameters[3] no_paths = 2 * no_paths if type_random_numbers == Types.TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = np.linspace(t0, t1, no_time_steps) delta_t_i = np.diff(t_i) f_t = np.empty((no_paths, no_time_steps)) f_t[:, 0] = f0 delta_weight = np.zeros(no_paths) gamma_weight = np.zeros(no_paths) var_weight = np.zeros(no_paths) inv_variance = np.zeros(no_paths) ln_x_t_paths = np.zeros(shape=(no_paths, no_time_steps)) int_v_t_paths = np.zeros(shape=(no_paths, no_time_steps - 1)) v_t_paths = np.zeros(shape=(no_paths, no_time_steps)) ln_x_t_paths[:, 0] = np.log(f0) v_t_paths[:, 0] = v0 map_out_put = {} for i in range(1, no_time_steps): u_variance = rnd_generator.uniform(0.0, 1.0, no_paths) z_f = rnd_generator.normal(0.0, 1.0, no_paths, type_random_numbers) np.copyto( v_t_paths[:, i], VarianceMC.get_variance(k, theta, epsilon, 1.5, t_i[i - 1], t_i[i], v_t_paths[:, i - 1], u_variance, no_paths)) np.copyto( int_v_t_paths[:, i - 1], HestonTools.get_integral_variance(t_i[i - 1], t_i[i], v_t_paths[:, i - 1], v_t_paths[:, i], 0.5, 0.5)) HestonTools.get_delta_weight(t_i[i - 1], t_i[i], v_t_paths[:, i - 1], v_t_paths[:, i], z_f, delta_weight) HestonTools.get_var_weight(t_i[i - 1], t_i[i], v_t_paths[:, i - 1], v_t_paths[:, i], z_f, var_weight) inv_variance += HestonTools.get_integral_variance( t_i[i - 1], t_i[i], 1.0 / v_t_paths[:, i - 1], 1.0 / v_t_paths[:, i], 0.5, 0.5) k0 = -delta_t_i[i - 1] * (rho * k * theta) / epsilon k1 = 0.5 * delta_t_i[i - 1] * ( (k * rho) / epsilon - 0.5) - rho / epsilon k2 = 0.5 * delta_t_i[i - 1] * ( (k * rho) / epsilon - 0.5) + rho / epsilon k3 = 0.5 * delta_t_i[i - 1] * (1.0 - rho * rho) np.copyto( ln_x_t_paths[:, i], ln_x_t_paths[:, i - 1] + k0 + k1 * v_t_paths[:, i - 1] + k2 * v_t_paths[:, i] + np.sqrt(k3) * AnalyticTools.dot_wise( np.sqrt(v_t_paths[:, i - 1] + v_t_paths[:, i]), z_f)) map_out_put[HESTON_OUTPUT.PATHS] = np.exp(ln_x_t_paths) map_out_put[HESTON_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_v_t_paths map_out_put[ HESTON_OUTPUT.DELTA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = np.multiply( delta_weight, 1.0 / (np.sqrt(1.0 - rho * rho) * t1 * f0)) map_out_put[HESTON_OUTPUT.SPOT_VARIANCE_PATHS] = v_t_paths map_out_put[HESTON_OUTPUT.TIMES] = t_i HestonTools.get_gamma_weight(delta_weight, var_weight, inv_variance, rho, t1, gamma_weight) map_out_put[ HESTON_OUTPUT.GAMMA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = np.multiply( gamma_weight, 1.0 / ((1.0 - rho * rho) * np.power(t1 * f0, 2.0))) return map_out_put
def generate_paths_compose_rbergomi(s0: float, sigma_0: float, nu: float, h_short: float, h_long: float, noise: ndarray, cholk_cov_short: ndarray, cholk_cov_long: ndarray, t_i_s: ndarray, no_paths: int): no_time_steps = len(t_i_s) paths = np.zeros(shape=(no_paths, no_time_steps)) int_v_t = np.zeros(shape=(no_paths, no_time_steps - 1)) int_v_t_short = np.zeros(shape=(no_paths, no_time_steps - 1)) int_v_t_long = np.zeros(shape=(no_paths, no_time_steps - 1)) sigma_i_1 = np.zeros(shape=(no_paths, no_time_steps)) sigma_i_1_short = np.zeros(shape=(no_paths, no_time_steps)) sigma_i_1_long = np.zeros(shape=(no_paths, no_time_steps)) sigma_i_1[:, 0] = sigma_0 sigma_i_1_short[:, 0] = 0.5 * sigma_0 sigma_i_1_long[:, 0] = 0.5 * sigma_0 paths[:, 0] = s0 # we compute before a loop of variance of the variance process var_w_t_short = get_volterra_variance(t_i_s[1:], h_short) var_w_t_long = get_volterra_variance(t_i_s[1:], h_long) for k in range(0, no_paths): w_t_k_short = AnalyticTools.apply_lower_tridiagonal_matrix( cholk_cov_short, noise[:, k]) w_t_k_long = AnalyticTools.apply_lower_tridiagonal_matrix( cholk_cov_long, noise[:, k]) # short term process w_i_s_1_short = 0.0 w_i_h_1_short = 0.0 var_w_t_i_1_short = 0.0 # long term process w_i_s_1_long = 0.0 w_i_h_1_long = 0.0 var_w_t_i_1_long = 0.0 for j in range(1, no_time_steps): delta_i_s = t_i_s[j] - t_i_s[j - 1] # Brownian and Gaussian increments d_w_i_s_short = w_t_k_short[j - 1] - w_i_s_1_short d_w_i_h_short = w_t_k_short[j + no_time_steps - 2] - w_i_h_1_short d_w_i_h_long = w_t_k_long[j + no_time_steps - 2] - w_i_h_1_long d_w_i_s_long = w_t_k_long[j - 1] - w_i_s_1_long sigma_i_1_short[k, j] = sigma_i_1_short[k, j - 1] * np.exp( -0.5 * nu * nu * (var_w_t_short[j - 1] - var_w_t_i_1_short) + nu * d_w_i_h_short) int_v_t_short[k, j - 1] = delta_i_s * 0.5 * ( sigma_i_1_short[k, j - 1] * sigma_i_1_short[k, j - 1] + sigma_i_1_short[k, j] * sigma_i_1_short[k, j]) sigma_i_1_long[k, j] = sigma_i_1_long[k, j - 1] * np.exp( -0.5 * nu * nu * (var_w_t_long[j - 1] - var_w_t_i_1_long) + nu * d_w_i_h_long) int_v_t_long[k, j - 1] = delta_i_s * 0.5 * ( sigma_i_1_long[k, j - 1] * sigma_i_1_long[k, j - 1] + sigma_i_1_long[k, j] * sigma_i_1_long[k, j]) int_v_t[k, j - 1] = int_v_t_long[k, j - 1] + int_v_t_short[k, j - 1] sigma_i_1[k, j] = sigma_i_1_long[k, j] + sigma_i_1_short[k, j] paths[k, j] = paths[k, j - 1] * np.exp( -0.5 * int_v_t[k, j - 1] * int_v_t[k, j - 1] + sigma_i_1_short[k, j - 1] * d_w_i_s_short + sigma_i_1_long[k, j - 1] * d_w_i_s_long) # Keep the last brownians and variance of the RL process w_i_s_1_short = w_t_k_short[j - 1] w_i_h_1_short = w_t_k_short[j + no_time_steps - 2] var_w_t_i_1_short = var_w_t_short[j - 1] w_i_s_1_long = w_t_k_long[j - 1] w_i_h_1_long = w_t_k_long[j + no_time_steps - 2] var_w_t_i_1_long = var_w_t_long[j - 1] return paths, sigma_i_1, int_v_t
def generate_paths_mixed_rbergomi(s0: float, sigma_0: float, nu_short: float, nu_long: float, h_short: float, h_long: float, noise: ndarray, cholk_cov: ndarray, t_i_s: ndarray, no_paths: int): no_time_steps = len(t_i_s) paths = np.zeros(shape=(no_paths, no_time_steps)) int_v_t = np.zeros(shape=(no_paths, no_time_steps - 1)) int_v_short_t = np.zeros(shape=(no_paths, no_time_steps - 1)) int_v_long_t = np.zeros(shape=(no_paths, no_time_steps - 1)) sigma_i_1 = np.zeros(shape=(no_paths, no_time_steps)) sigma_short_i_1 = np.zeros(shape=(no_paths, no_time_steps)) sigma_long_i_1 = np.zeros(shape=(no_paths, no_time_steps)) sigma_short_i_1[:, 0] = 0.5 * sigma_0 sigma_long_i_1[:, 0] = 0.5 * sigma_0 sigma_i_1[:, 0] = sigma_0 paths[:, 0] = s0 # we compute before a loop of variance of the variance process var_w_t_short = get_variance_rbergomi(t_i_s[1:], h_short) var_w_t_long = get_variance_rbergomi(t_i_s[1:], h_long) for k in range(0, no_paths): w_t_k = AnalyticTools.apply_lower_tridiagonal_matrix( cholk_cov, noise[:, k]) w_i_s_1 = 0.0 w_i_h_short_1 = 0.0 w_i_h_long_1 = 0.0 var_short_w_t_i_1 = 0.0 var_long_w_t_i_1 = 0.0 for j in range(1, no_time_steps): delta_i_s = t_i_s[j] - t_i_s[j - 1] # Brownian and Gaussian increments d_w_i_s = w_t_k[j - 1] - w_i_s_1 d_w_i_h_short = w_t_k[j + no_time_steps - 2] - w_i_h_short_1 d_w_i_h_long = w_t_k[j + 2 * no_time_steps - 3] - w_i_h_long_1 sigma_short_i_1[k, j] = sigma_short_i_1[ k, j - 1] * np.exp(-0.5 * nu_short * nu_short * (var_w_t_short[j - 1] - var_short_w_t_i_1) + nu_short * d_w_i_h_short) sigma_long_i_1[k, j] = sigma_long_i_1[ k, j - 1] * np.exp(-0.5 * nu_long * nu_long * (var_w_t_long[j - 1] - var_long_w_t_i_1) + nu_long * d_w_i_h_long) sigma_i_1[k, j] = sigma_short_i_1[k, j] + sigma_long_i_1[k, j] int_v_short_t[k, j - 1] = delta_i_s * 0.5 * ( sigma_short_i_1[k, j - 1] * sigma_short_i_1[k, j - 1] + sigma_short_i_1[k, j] * sigma_short_i_1[k, j]) int_v_long_t[k, j - 1] = delta_i_s * 0.5 * ( sigma_long_i_1[k, j - 1] * sigma_long_i_1[k, j - 1] + sigma_long_i_1[k, j] * sigma_long_i_1[k, j]) int_v_t[k, j - 1] = delta_i_s * sigma_i_1[k, j] * sigma_i_1[k, j] paths[k, j] = paths[k, j - 1] * np.exp(-0.5 * int_v_t[k, j - 1] + sigma_i_1[k, j - 1] * d_w_i_s) # Keep the last brownians and variance of the RL process w_i_s_1 = w_t_k[j - 1] w_i_h_short_1 = w_t_k[j + no_time_steps - 2] w_i_h_long_1 = w_t_k[j + 2 * no_time_steps - 3] var_short_w_t_i_1 = var_w_t_short[j - 1] var_long_w_t_i_1 = var_w_t_long[j - 1] return paths, sigma_i_1, int_v_t
def get_path_multi_step(t0: float, t1: float, parameters: Vector, f0: float, no_paths: int, no_time_steps: int, type_random_number: TYPE_STANDARD_NORMAL_SAMPLING, rnd_generator, **kwargs) -> map: alpha = parameters[0] nu = parameters[1] rho = parameters[2] rho_inv = np.sqrt(1.0 - rho * rho) no_paths = 2 * no_paths if type_random_number == TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = get_time_steps(t0, t1, no_time_steps, **kwargs) no_time_steps = len(t_i) delta_t_i = np.diff(t_i) s_t = np.empty((no_paths, no_time_steps)) sigma_t = np.empty((no_paths, no_time_steps)) int_v_t_paths = np.zeros(shape=(no_paths, no_time_steps - 1)) s_t[:, 0] = f0 sigma_t[:, 0] = alpha int_sigma_t_i = np.empty((no_paths, no_time_steps - 1)) sigma_t_i_1 = np.empty(no_paths) sigma_t_i_1[:] = alpha delta_weight = np.zeros(no_paths) gamma_weight = np.zeros(no_paths) var_weight = np.zeros(no_paths) inv_variance = np.zeros(no_paths) map_output = {} for i_step in range(1, no_time_steps): z_i = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) z_sigma = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) sigma_t_i = get_vol_sampling(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, nu, z_sigma) sigma_t[:, i_step] = sigma_t_i int_sigma_t_i[:, i_step - 1] = 0.5 * ( sigma_t_i_1 * sigma_t_i_1 * delta_t_i[i_step - 1] + sigma_t_i * sigma_t_i * delta_t_i[i_step - 1]) diff_sigma = (rho / nu) * (sigma_t_i - sigma_t_i_1) noise_sigma = AnalyticTools.dot_wise( np.sqrt(int_sigma_t_i[:, i_step - 1]), z_i) SABRTools.get_delta_weight(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, sigma_t_i, z_sigma, delta_weight) SABRTools.get_var_weight(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, sigma_t_i, z_sigma, var_weight) inv_variance += SABRTools.get_integral_variance( t_i[i_step - 1], t_i[i_step], 1.0 / sigma_t_i_1, 1.0 / sigma_t_i, 0.5, 0.5) np.copyto( int_v_t_paths[:, i_step - 1], SABRTools.get_integral_variance(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, sigma_t_i, 0.5, 0.5)) sigma_t_i_1 = sigma_t_i.copy() s_t[:, i_step] = AnalyticTools.dot_wise( s_t[:, i_step - 1], np.exp(-0.5 * int_sigma_t_i[:, i_step - 1] + diff_sigma + rho_inv * noise_sigma)) map_output[ SABR_OUTPUT.DELTA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = delta_weight map_output[SABR_OUTPUT.PATHS] = s_t map_output[SABR_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_v_t_paths map_output[SABR_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_sigma_t_i map_output[SABR_OUTPUT.SIGMA_PATHS] = sigma_t map_output[SABR_OUTPUT.TIMES] = t_i SABRTools.get_gamma_weight(delta_weight, var_weight, inv_variance, rho, t1, gamma_weight) map_output[ SABR_OUTPUT.GAMMA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = np.multiply( gamma_weight, 1.0 / ((1.0 - rho * rho) * np.power(t1 * f0, 2.0))) return map_output