def get_path_multi_step( t0: float, t1: float, parameters: Types.ndarray, f0: float, v0: float, no_paths: int, no_time_steps: int, type_random_number: Types.TYPE_STANDARD_NORMAL_SAMPLING, rnd_generator, **kwargs) -> map: nu_1 = parameters[0] nu_2 = parameters[1] theta = parameters[2] rho = parameters[3] rho_inv = np.sqrt(1.0 - rho * rho) no_paths = 2 * no_paths if type_random_number == Types.TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = get_time_steps(t0, t1, no_time_steps, **kwargs) no_time_steps = len(t_i) delta_t_i = np.diff(t_i) x_t = np.empty((no_paths, no_time_steps)) v_t = np.empty((no_paths, no_time_steps)) int_variance_t_i = np.empty((no_paths, no_time_steps - 1)) x_t[:, 0] = np.log(f0) v_t[:, 0] = v0 v_t_1_i_1 = np.empty(no_paths) v_t_2_i_1 = np.empty(no_paths) v_t_1_i_1[:] = v0 v_t_2_i_1[:] = v0 map_output = {} for i_step in range(1, no_time_steps): z_i = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) z_sigma = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) v_t[:, i_step] = get_variance_sampling(t_i[i_step - 1], t_i[i_step], v_t_1_i_1, v_t_2_i_1, theta, nu_1, nu_2, z_sigma) int_variance_t_i[:, i_step - 1] = 0.5 * ( v_t[:, i_step - 1] * delta_t_i[i_step - 1] + v_t[:, i_step - 1] * delta_t_i[i_step - 1]) x_t[:, i_step] = x_t[:, i_step - 1] - 0.5 * int_variance_t_i[:, i_step - 1] + \ AnalyticTools.dot_wise(np.sqrt(v_t[:, i_step - 1]), (rho * z_sigma + rho_inv * z_i) * np.sqrt(delta_t_i[i_step - 1])) map_output[Types.MIXEDLOGNORMAL_OUTPUT.SPOT_VARIANCE_PATHS] = v_t map_output[Types.MIXEDLOGNORMAL_OUTPUT.TIMES] = t_i map_output[ Types.MIXEDLOGNORMAL_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_variance_t_i map_output[Types.MIXEDLOGNORMAL_OUTPUT.PATHS] = np.exp(x_t) return map_output
def get_path_multi_step( t0: float, t1: float, f0: float, no_paths: int, no_time_steps: int, type_random_number: Types.TYPE_STANDARD_NORMAL_SAMPLING, local_vol: Callable[[float, Types.ndarray], Types.ndarray], rnd_generator, **kwargs) -> map: no_paths = 2 * no_paths if type_random_number == Types.TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = get_time_steps(t0, t1, no_time_steps, **kwargs) no_time_steps = len(t_i) t_i = np.linspace(t0, t1, no_time_steps) delta_t_i = np.diff(t_i) x_t = np.empty((no_paths, no_time_steps)) int_v_t = np.empty((no_paths, no_time_steps - 1)) v_t = np.empty((no_paths, no_time_steps)) x_t[:, 0] = np.log(f0) v_t[:, 0] = local_vol(t0, x_t[:, 0]) sigma_i_1 = np.zeros(no_paths) sigma_i = np.zeros(no_paths) sigma_t = np.zeros(no_paths) x_t_i_mean = np.zeros(no_paths) map_output = {} for i_step in range(1, no_time_steps): z_i = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) np.copyto(sigma_i_1, local_vol(t_i[i_step - 1], x_t[:, i_step - 1])) np.copyto(x_t_i_mean, x_t[:, i_step - 1] - 0.5 * np.power(sigma_i_1, 2.0)) np.copyto(sigma_i, local_vol(t_i[i_step], x_t_i_mean)) np.copyto(sigma_t, 0.5 * (sigma_i_1 + sigma_i)) v_t[:, i_step] = np.power(sigma_t, 2.0) int_v_t[:, i_step - 1] = v_t[:, i_step] * delta_t_i[i_step - 1] x_t[:, i_step] = np.add( x_t[:, i_step - 1], -0.5 * v_t[:, i_step] * delta_t_i[i_step - 1] + np.sqrt(delta_t_i[i_step - 1]) * AnalyticTools.dot_wise(sigma_t, z_i)) map_output[Types.LOCAL_VOL_OUTPUT.TIMES] = t_i map_output[Types.LOCAL_VOL_OUTPUT.PATHS] = np.exp(x_t) map_output[Types.LOCAL_VOL_OUTPUT.SPOT_VARIANCE_PATHS] = v_t map_output[Types.LOCAL_VOL_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_v_t return map_output
rng = RNG.RndGenerator(seed) paths = fBM.cholesky_method(t0, t1, z0, rng, hurst_parameter, no_paths, int(no_time_steps * t1)) # Time steps to compute t = np.linspace(t0, t1, int(no_time_steps * t1)) # Compute mean empirical_mean = np.mean(paths, axis=0) # Compute variance empirical_variance = np.var(paths, axis=0) exact_variance = [fBM.covariance(t_i, t_i, hurst_parameter) for t_i in t] # Compute covariance no_full_time_steps = len(t) empirical_covariance = np.zeros(shape=(no_time_steps, no_full_time_steps)) exact_covariance = np.zeros(shape=(no_time_steps, no_full_time_steps)) for i in range(0, no_time_steps): for j in range(0, i): empirical_covariance[i, j] = np.mean(AnalyticTools.dot_wise(paths[:, i], paths[:, j])) - \ empirical_mean[i] * empirical_mean[j] exact_covariance[i, j] = fBM.covariance(t[i], t[j], hurst_parameter) empirical_covariance[j, i] = empirical_covariance[i, j] exact_covariance[j, i] = exact_covariance[i, j] error_covariance = np.max(np.abs(empirical_covariance - exact_covariance)) error_variance = np.max(np.abs(exact_variance - empirical_variance)) error_mean = np.max(np.abs(empirical_mean))
def get_path_multi_step( t0: float, t1: float, parameters: Vector, f0: float, v0: float, no_paths: int, no_time_steps: int, type_random_numbers: Types.TYPE_STANDARD_NORMAL_SAMPLING, rnd_generator) -> ndarray: k = parameters[0] theta = parameters[1] epsilon = parameters[2] rho = parameters[3] no_paths = 2 * no_paths if type_random_numbers == Types.TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = np.linspace(t0, t1, no_time_steps) delta_t_i = np.diff(t_i) f_t = np.empty((no_paths, no_time_steps)) f_t[:, 0] = f0 delta_weight = np.zeros(no_paths) gamma_weight = np.zeros(no_paths) var_weight = np.zeros(no_paths) inv_variance = np.zeros(no_paths) ln_x_t_paths = np.zeros(shape=(no_paths, no_time_steps)) int_v_t_paths = np.zeros(shape=(no_paths, no_time_steps - 1)) v_t_paths = np.zeros(shape=(no_paths, no_time_steps)) ln_x_t_paths[:, 0] = np.log(f0) v_t_paths[:, 0] = v0 map_out_put = {} for i in range(1, no_time_steps): u_variance = rnd_generator.uniform(0.0, 1.0, no_paths) z_f = rnd_generator.normal(0.0, 1.0, no_paths, type_random_numbers) np.copyto( v_t_paths[:, i], VarianceMC.get_variance(k, theta, epsilon, 1.5, t_i[i - 1], t_i[i], v_t_paths[:, i - 1], u_variance, no_paths)) np.copyto( int_v_t_paths[:, i - 1], HestonTools.get_integral_variance(t_i[i - 1], t_i[i], v_t_paths[:, i - 1], v_t_paths[:, i], 0.5, 0.5)) HestonTools.get_delta_weight(t_i[i - 1], t_i[i], v_t_paths[:, i - 1], v_t_paths[:, i], z_f, delta_weight) HestonTools.get_var_weight(t_i[i - 1], t_i[i], v_t_paths[:, i - 1], v_t_paths[:, i], z_f, var_weight) inv_variance += HestonTools.get_integral_variance( t_i[i - 1], t_i[i], 1.0 / v_t_paths[:, i - 1], 1.0 / v_t_paths[:, i], 0.5, 0.5) k0 = -delta_t_i[i - 1] * (rho * k * theta) / epsilon k1 = 0.5 * delta_t_i[i - 1] * ( (k * rho) / epsilon - 0.5) - rho / epsilon k2 = 0.5 * delta_t_i[i - 1] * ( (k * rho) / epsilon - 0.5) + rho / epsilon k3 = 0.5 * delta_t_i[i - 1] * (1.0 - rho * rho) np.copyto( ln_x_t_paths[:, i], ln_x_t_paths[:, i - 1] + k0 + k1 * v_t_paths[:, i - 1] + k2 * v_t_paths[:, i] + np.sqrt(k3) * AnalyticTools.dot_wise( np.sqrt(v_t_paths[:, i - 1] + v_t_paths[:, i]), z_f)) map_out_put[HESTON_OUTPUT.PATHS] = np.exp(ln_x_t_paths) map_out_put[HESTON_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_v_t_paths map_out_put[ HESTON_OUTPUT.DELTA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = np.multiply( delta_weight, 1.0 / (np.sqrt(1.0 - rho * rho) * t1 * f0)) map_out_put[HESTON_OUTPUT.SPOT_VARIANCE_PATHS] = v_t_paths map_out_put[HESTON_OUTPUT.TIMES] = t_i HestonTools.get_gamma_weight(delta_weight, var_weight, inv_variance, rho, t1, gamma_weight) map_out_put[ HESTON_OUTPUT.GAMMA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = np.multiply( gamma_weight, 1.0 / ((1.0 - rho * rho) * np.power(t1 * f0, 2.0))) return map_out_put
def get_path_multi_step(t0: float, t1: float, parameters: Vector, f0: float, no_paths: int, no_time_steps: int, type_random_number: TYPE_STANDARD_NORMAL_SAMPLING, rnd_generator, **kwargs) -> map: alpha = parameters[0] nu = parameters[1] rho = parameters[2] rho_inv = np.sqrt(1.0 - rho * rho) no_paths = 2 * no_paths if type_random_number == TYPE_STANDARD_NORMAL_SAMPLING.ANTITHETIC else no_paths t_i = get_time_steps(t0, t1, no_time_steps, **kwargs) no_time_steps = len(t_i) delta_t_i = np.diff(t_i) s_t = np.empty((no_paths, no_time_steps)) sigma_t = np.empty((no_paths, no_time_steps)) int_v_t_paths = np.zeros(shape=(no_paths, no_time_steps - 1)) s_t[:, 0] = f0 sigma_t[:, 0] = alpha int_sigma_t_i = np.empty((no_paths, no_time_steps - 1)) sigma_t_i_1 = np.empty(no_paths) sigma_t_i_1[:] = alpha delta_weight = np.zeros(no_paths) gamma_weight = np.zeros(no_paths) var_weight = np.zeros(no_paths) inv_variance = np.zeros(no_paths) map_output = {} for i_step in range(1, no_time_steps): z_i = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) z_sigma = rnd_generator.normal(0.0, 1.0, no_paths, type_random_number) sigma_t_i = get_vol_sampling(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, nu, z_sigma) sigma_t[:, i_step] = sigma_t_i int_sigma_t_i[:, i_step - 1] = 0.5 * ( sigma_t_i_1 * sigma_t_i_1 * delta_t_i[i_step - 1] + sigma_t_i * sigma_t_i * delta_t_i[i_step - 1]) diff_sigma = (rho / nu) * (sigma_t_i - sigma_t_i_1) noise_sigma = AnalyticTools.dot_wise( np.sqrt(int_sigma_t_i[:, i_step - 1]), z_i) SABRTools.get_delta_weight(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, sigma_t_i, z_sigma, delta_weight) SABRTools.get_var_weight(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, sigma_t_i, z_sigma, var_weight) inv_variance += SABRTools.get_integral_variance( t_i[i_step - 1], t_i[i_step], 1.0 / sigma_t_i_1, 1.0 / sigma_t_i, 0.5, 0.5) np.copyto( int_v_t_paths[:, i_step - 1], SABRTools.get_integral_variance(t_i[i_step - 1], t_i[i_step], sigma_t_i_1, sigma_t_i, 0.5, 0.5)) sigma_t_i_1 = sigma_t_i.copy() s_t[:, i_step] = AnalyticTools.dot_wise( s_t[:, i_step - 1], np.exp(-0.5 * int_sigma_t_i[:, i_step - 1] + diff_sigma + rho_inv * noise_sigma)) map_output[ SABR_OUTPUT.DELTA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = delta_weight map_output[SABR_OUTPUT.PATHS] = s_t map_output[SABR_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_v_t_paths map_output[SABR_OUTPUT.INTEGRAL_VARIANCE_PATHS] = int_sigma_t_i map_output[SABR_OUTPUT.SIGMA_PATHS] = sigma_t map_output[SABR_OUTPUT.TIMES] = t_i SABRTools.get_gamma_weight(delta_weight, var_weight, inv_variance, rho, t1, gamma_weight) map_output[ SABR_OUTPUT.GAMMA_MALLIAVIN_WEIGHTS_PATHS_TERMINAL] = np.multiply( gamma_weight, 1.0 / ((1.0 - rho * rho) * np.power(t1 * f0, 2.0))) return map_output