def test_gbm_euler_step_is_deterministic(self): drift = 0.2 vol = 0.1 t = 0.2 dt = 0.01 num_samples = 8 key = 1337 states = tf.ones([num_samples]) eps_t = contrib_stateless.stateless_random_normal( shape=[num_samples], seed=[key, int(t / dt)]) next_states = dynamics.gbm_euler_step(states, drift, vol, t, dt, random_normal_op=lambda: eps_t) next_states_bis = dynamics.gbm_euler_step(states, drift, vol, t, dt, key=key) with self.session() as session: next_states_eval, next_states_bis_eval = session.run( (next_states, next_states_bis)) self.assertEqual(next_states_eval.shape, (num_samples, )) self.assertEqual(next_states_bis_eval.shape, (num_samples, )) self.assertAllClose(next_states_eval, next_states_bis_eval, atol=1e-7)
def test_gbm_euler_step_output_changes_with_key(self): drift = 0.2 vol = 0.1 t = 0.2 dt = 0.01 num_samples = 8 key_0 = 74 key_1 = 75 states = tf.ones([num_samples]) next_states_0 = dynamics.gbm_euler_step(states, drift, vol, t, dt, key=key_0) next_states_1 = dynamics.gbm_euler_step(states, drift, vol, t, dt, key=key_1) with self.session() as session: next_states_0_eval, next_states_1_eval = session.run( (next_states_0, next_states_1)) # The step is a bijection w.r.t. dw_t, all terms should be different. self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_expects_static_shape(self): drift = 0.2 vol = 0.1 t = 0.0 dt = 0.01 states = tf.placeholder(dtype=tf.float32, shape=[None]) with self.assertRaises(ValueError): dynamics.gbm_euler_step(states, drift, vol, t, dt)
def test_gbm_euler_step_output_is_correct(self): np.random.seed(0) drift = 0.2 vol = 0.1 t = 0.0 dt = 0.01 num_samples = 8 states = tf.ones([num_samples]) eps_t = np.ndarray.astype(np.random.normal(size=[num_samples]), dtype=np.float32) next_states = dynamics.gbm_euler_step(states, drift, vol, t, dt, random_normal_op=lambda: eps_t) with self.session() as session: next_states_eval = session.run(next_states) self.assertEqual(next_states_eval.shape, (num_samples, )) # Here the maximum discrepancy is 1.17e-7 due to differences in # numerical implementations between tf and np so we set delta to 1.2e-7. self.assertAllClose(next_states_eval, np.ones([num_samples], dtype=np.float32) * (1.0 + drift * dt + vol * eps_t * np.sqrt(dt)), atol=1.2e-7)
def test_gbm_euler_step_output_changes_with_t(self): drift = 0.2 vol = 0.1 t_0 = 0.2 dt = 0.01 num_samples = 8 t_1 = t_0 + dt states = tf.ones([num_samples]) next_states_0 = dynamics.gbm_euler_step(states, drift, vol, t_0, dt) next_states_1 = dynamics.gbm_euler_step(states, drift, vol, t_1, dt) with self.session() as session: next_states_0_eval, next_states_1_eval = session.run( (next_states_0, next_states_1)) self.assertEqual(next_states_0_eval.shape, (num_samples, )) self.assertEqual(next_states_1_eval.shape, (num_samples, )) # The step is a bijection w.r.t. dw_t, all terms should be different. self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_european_call_estimator_converges_close_to_black_scholes(self): current_price = 100.0 r = interest_rate = 0.05 vol = 0.2 strike = 120.0 maturity = 0.5 dt = 0.001 discount = tf.exp(-r * maturity) tol = 5e-2 conf_level = 0.95 batch_size = int(1e4) k = key_placeholder = tf.placeholder(shape=(), dtype=tf.int32) max_num_steps = 1e5 bs_call_price = util.black_scholes_call_price(current_price, interest_rate, vol, strike, maturity) initial_state = tf.constant(current_price) dynamics_op = lambda s, t, dt: dynamics.gbm_euler_step( s, r, vol, t, dt, k) payoff_fn = lambda s: discount * payoffs.call_payoff(s, strike) (mean_est, mean_sq_est, _) = monte_carlo_manager.non_callable_price_mc( initial_state, dynamics_op, payoff_fn, maturity, batch_size, dt) with self.test_session() as session: (mean_est_eval, _, converged) = monte_carlo_manager.mc_estimator( mean_est, mean_sq_est, batch_size, key_placeholder, {}, tol, conf_level, max_num_steps, session) bs_call_price_eval = session.run(bs_call_price) self.assertTrue(converged) # Here the discretization bias would make these asserts fail with larger dt. self.assertLessEqual(mean_est_eval, bs_call_price_eval * (1.0 + tol)) self.assertGreaterEqual(mean_est_eval, bs_call_price_eval * (1.0 - tol))
def test_european_call_euler_mc_close_to_black_scholes(self): current_price = 100.0 r = interest_rate = 0.05 vol = 0.2 strike = 120.0 maturity = 0.5 dt = 0.01 discount = tf.exp(-r * maturity) bs_call_price = util.black_scholes_call_price(current_price, interest_rate, vol, strike, maturity) num_samples = int(1e4) initial_state = tf.constant(current_price) dynamics_op = lambda s, t, dt: dynamics.gbm_euler_step( s, r, vol, t, dt) payoff_fn = lambda s: discount * payoffs.call_payoff(s, strike) (mean_outcome, mean_sq_outcome, _) = monte_carlo_manager.non_callable_price_mc( initial_state, dynamics_op, payoff_fn, maturity, num_samples, dt) std_outcomes = util.stddev_est(mean_outcome, mean_sq_outcome) with self.test_session() as session: bs_call_price_eval = session.run(bs_call_price) mean_outcome_eval, std_outcomes_eval = session.run( (mean_outcome, std_outcomes)) self.assertLessEqual( mean_outcome_eval, bs_call_price_eval + 3.0 * std_outcomes_eval / np.sqrt(num_samples)) self.assertGreaterEqual( mean_outcome_eval, bs_call_price_eval - 3.0 * std_outcomes_eval / np.sqrt(num_samples))