def _sample_paths(self, times, num_requested_times, initial_state, num_samples, random_type, seed, skip): """Returns a sample of paths from the process.""" # Normal draws needed for sampling normal_draws = utils.generate_mc_normal_draws( num_normal_draws=1, num_time_steps=num_requested_times, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) times = tf.concat([[0], times], -1) dt = times[1:] - times[:-1] # The logarithm of all the increments between the times. log_increments = ((self._mu - self._sigma**2 / 2) * dt + tf.sqrt(dt) * self._sigma * tf.transpose(tf.squeeze(normal_draws, -1))) # Since the implementation of tf.math.cumsum is single-threaded we # use lower-triangular matrix multiplication instead once = tf.ones([num_requested_times, num_requested_times], dtype=self._dtype) lower_triangular = tf.linalg.band_part(once, -1, 0) cumsum = tf.linalg.matvec(lower_triangular, log_increments) samples = initial_state * tf.math.exp(cumsum) return tf.expand_dims(samples, -1)
def test_sobol_numbers_generation(self): """Sobol random dtype results in the correct draws.""" for dtype in (tf.float32, tf.float64): num_draws = tf.constant(2, dtype=tf.int32) steps_num = tf.constant(3, dtype=tf.int32) num_samples = tf.constant(4, dtype=tf.int32) random_type = tff.math.random.RandomType.SOBOL skip = 10 samples = utils.generate_mc_normal_draws( num_normal_draws=num_draws, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, dtype=dtype, skip=skip) expected_samples = [[[0.8871465, 0.48877636], [-0.8871465, -0.48877636], [0.48877636, 0.8871465], [-0.15731068, 0.15731068]], [[0.8871465, -1.5341204], [1.5341204, -0.15731068], [-0.15731068, 1.5341204], [-0.8871465, 0.48877636]], [[-0.15731068, 1.5341204], [0.15731068, -0.48877636], [-1.5341204, 0.8871465], [0.8871465, -1.5341204]]] self.assertAllClose(samples, expected_samples, rtol=1e-5, atol=1e-5)
def _sample(dim, drift_fn, volatility_fn, times, time_step, keep_mask, times_shape, num_samples, initial_state, random_type, seed, swap_memory, skip, dtype): """Returns a sample of paths from the process using Euler method.""" dt = times[1:] - times[:-1] sqrt_dt = tf.sqrt(dt) current_state = initial_state + tf.zeros([num_samples, dim], dtype=initial_state.dtype) if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError( 'Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') # In order to use low-discrepancy random_type we need to generate the sequence # of independent random normals upfront. if random_type in (random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, dtype=dtype, seed=seed, skip=skip) wiener_mean = None else: # If pseudo or anthithetic sampling is used, proceed with random sampling # at each step. wiener_mean = tf.zeros((dim, ), dtype=dtype, name='wiener_mean') normal_draws = None cond_fn = lambda i, *args: i < steps_num # Maximum number iterations is passed to the while loop below. It improves # performance of the while loop on a GPU and is needed for XLA-compilation # comptatiblity. def step_fn(i, written_count, current_state, result): return _euler_step(i, written_count, current_state, result, drift_fn, volatility_fn, wiener_mean, num_samples, times, dt, sqrt_dt, keep_mask, random_type, seed, normal_draws) maximum_iterations = (tf.cast(1. / time_step, dtype=tf.int32) + tf.size(times)) result = tf.TensorArray(dtype=dtype, size=times_shape[-1]) _, _, _, result = tf.while_loop(cond_fn, step_fn, (0, 0, current_state, result), maximum_iterations=maximum_iterations, swap_memory=swap_memory) result = tf.transpose(result.stack(), (1, 0, 2)) # Shape of `rate_paths` is dynamic in `times` dimension because of # `TensorArray`. In order to make the shape static, use `set_shape` method. # TODO(b/148854825): Consider removing TensorArray to make all shapes static. result.set_shape(current_state.shape[:1] + times_shape + current_state.shape[-1:]) return result
def _sample_paths( self, times, num_requested_times, initial_state, num_samples, random_type, seed, skip, normal_draws, ): """Returns a sample of paths from the process.""" if normal_draws is None: # Normal draws needed for sampling. # Shape [num_requested_times, num_samples, dim] normal_draws = utils.generate_mc_normal_draws( num_normal_draws=self._dim, num_time_steps=num_requested_times, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: # Shape [num_time_points, num_samples, dim] normal_draws = tf.transpose(normal_draws, [1, 0, 2]) num_samples = tf.shape(normal_draws)[1] draws_dim = normal_draws.shape[2] if self._dim != draws_dim: raise ValueError( "`dim` should be equal to `normal_draws.shape[2]` but are " "{0} and {1} respectively".format(self._dim, draws_dim)) times = tf.concat([[0], times], -1) # Time increments # Shape [num_requested_times, 1, 1] dt = tf.expand_dims(tf.expand_dims(times[1:] - times[:-1], axis=-1), axis=-1) if self._corr_matrix is None: stochastic_increment = normal_draws else: cholesky = tf.linalg.cholesky(self._corr_matrix) stochastic_increment = tf.linalg.matvec(cholesky, normal_draws) # The logarithm of all the increments between the times. # Shape [num_requested_times, num_samples, dim] log_increments = ((self._means - self._vols**2 / 2) * dt + tf.sqrt(dt) * self._vols * stochastic_increment) # Since the implementation of tf.math.cumsum is single-threaded we # use lower-triangular matrix multiplication instead once = tf.ones([num_requested_times, num_requested_times], dtype=self._dtype) lower_triangular = tf.linalg.band_part(once, -1, 0) cumsum = tf.linalg.matvec(lower_triangular, tf.transpose(log_increments)) cumsum = tf.transpose(cumsum, [1, 2, 0]) samples = initial_state * tf.math.exp(cumsum) return samples
def _sample(*, dim, drift_fn, volatility_fn, times, time_step, keep_mask, num_requested_times, num_samples, initial_state, random_type, seed, swap_memory, skip, precompute_normal_draws, watch_params, time_indices, dtype): """Returns a sample of paths from the process using Euler method.""" dt = times[1:] - times[:-1] sqrt_dt = tf.sqrt(dt) current_state = initial_state + tf.zeros([num_samples, dim], dtype=initial_state.dtype) if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # In order to use low-discrepancy random_type we need to generate the sequence # of independent random normals upfront. We also precompute random numbers # for stateless random type in order to ensure independent samples for # multiple function calls whith different seeds. if precompute_normal_draws or random_type in ( random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, dtype=dtype, seed=seed, skip=skip) wiener_mean = None else: # If pseudo or anthithetic sampling is used, proceed with random sampling # at each step. wiener_mean = tf.zeros((dim,), dtype=dtype, name='wiener_mean') normal_draws = None if watch_params is None: # Use while_loop if `watch_params` is not passed return _while_loop( dim=dim, steps_num=steps_num, current_state=current_state, drift_fn=drift_fn, volatility_fn=volatility_fn, wiener_mean=wiener_mean, num_samples=num_samples, times=times, dt=dt, sqrt_dt=sqrt_dt, time_step=time_step, keep_mask=keep_mask, num_requested_times=num_requested_times, swap_memory=swap_memory, random_type=random_type, seed=seed, normal_draws=normal_draws) else: # Use custom for_loop if `watch_params` is specified return _for_loop( steps_num=steps_num, current_state=current_state, drift_fn=drift_fn, volatility_fn=volatility_fn, wiener_mean=wiener_mean, num_samples=num_samples, times=times, dt=dt, sqrt_dt=sqrt_dt, time_indices=time_indices, keep_mask=keep_mask, watch_params=watch_params, random_type=random_type, seed=seed, normal_draws=normal_draws)
def _sample_paths(self, times, num_requested_times, initial_state, num_samples, random_type, seed, skip, normal_draws): """Returns a sample of paths from the process.""" if normal_draws is None: # Normal draws needed for sampling normal_draws = utils.generate_mc_normal_draws( num_normal_draws=1, num_time_steps=num_requested_times, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: # Shape [num_time_points, num_samples, dim] normal_draws = tf.transpose(normal_draws, [1, 0, 2]) num_samples = tf.shape(normal_draws)[1] draws_dim = normal_draws.shape[2] if draws_dim != 1: raise ValueError( '`dim` should be equal to `1` but is {0}'.format( draws_dim)) # Create a set of zeros that is the right shape to add a '0' as the first # element for each series of times. zeros = tf.zeros(tf.concat([times.shape[:-1], [1]], 0), dtype=self._dtype) times = tf.concat([zeros, times], -1) mean_integral = self._integrate_parameter(self._mean, self._mean_is_constant, times[..., :-1], times[..., 1:]) # mean_integral has shape [batch_shape, k-1], where self._mean has shape # [batch_shape, 1] and times has shape [k]. mean_integral = tf.expand_dims(mean_integral, -2) volatility_sq_integral = self._integrate_parameter( self._volatility_squared, self._volatility_is_constant, times[..., :-1], times[..., 1:]) volatility_sq_integral = tf.expand_dims(volatility_sq_integral, -2) # Giving mean_integral and volatility_sq_integral # shape = `batch_shape + [1, k-1]`, # where self._mean has shape `batch_shape + [1]` and times has shape `[k]`. # The logarithm of all the increments between the times. log_increments = ((mean_integral - volatility_sq_integral / 2) + tf.sqrt(volatility_sq_integral) * tf.transpose(tf.squeeze(normal_draws, -1))) # Since the implementation of tf.math.cumsum is single-threaded we # use lower-triangular matrix multiplication instead once = tf.ones([num_requested_times, num_requested_times], dtype=self._dtype) lower_triangular = tf.linalg.band_part(once, -1, 0) cumsum = tf.linalg.matvec(lower_triangular, log_increments) samples = tf.expand_dims(initial_state, [-1]) * tf.math.exp(cumsum) return tf.expand_dims(samples, -1)
def _sample_paths(self, times, num_requested_times, initial_state, num_samples, random_type, seed, skip, normal_draws): """Returns a sample of paths from the process.""" if normal_draws is None: # Normal draws needed for sampling normal_draws = utils.generate_mc_normal_draws( num_normal_draws=1, num_time_steps=num_requested_times, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: # Shape [num_time_points, num_samples, dim] normal_draws = tf.transpose(normal_draws, [1, 0, 2]) num_samples = tf.shape(normal_draws)[1] draws_dim = normal_draws.shape[2] if draws_dim != 1: raise ValueError( "`dim` should be equal to `1` but is {0}".format( draws_dim)) times = tf.concat([[0], times], -1) mu_integral = self._integrate_parameter(self._mu, self._mu_is_constant, times[:-1], times[1:]) # mu_integral has shape [batch_shape, k-1], where self._mu has shape # [batch_shape, 1] and times has shape [k]. mu_integral = tf.expand_dims(mu_integral, -2) sigma_sq_integral = self._integrate_parameter(self._sigma_squared, self._sigma_is_constant, times[:-1], times[1:]) sigma_sq_integral = tf.expand_dims(sigma_sq_integral, -2) # Giving mu_integral and sigma_sq_integral shape = [batch_shape, 1, k-1], # where self._mu has shape [batch_shape, 1] and times has shape [k]. # The logarithm of all the increments between the times. log_increments = ((mu_integral - sigma_sq_integral / 2) + tf.sqrt(sigma_sq_integral) * tf.transpose(tf.squeeze(normal_draws, -1))) # Since the implementation of tf.math.cumsum is single-threaded we # use lower-triangular matrix multiplication instead once = tf.ones([num_requested_times, num_requested_times], dtype=self._dtype) lower_triangular = tf.linalg.band_part(once, -1, 0) cumsum = tf.linalg.matvec(lower_triangular, log_increments) samples = tf.expand_dims(initial_state, [-1]) * tf.math.exp(cumsum) return tf.expand_dims(samples, -1)
def _sample_paths(self, times, times_shape, current_log_spot, current_vol, num_samples, random_type, keep_mask, seed, skip, tolerance): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [1]. dt = times[1:] - times[:-1] # Compute the parameters at `times`. Here + tf.reduce_min(dt) / 2 ensures # that the value is constant between `times`. kappa, theta, epsilon, rho = _get_parameters( # pylint: disable=unbalanced-tuple-unpacking times + tf.reduce_min(dt) / 2, self._kappa, self._theta, self._epsilon, self._rho) # In order random_type which is not PSEUDO, sequence of independent random # normals should be generated upfront. if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError( 'Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') if random_type != random.RandomType.PSEUDO: # Note that at each iteration we need 3 random draws. normal_draws = utils.generate_mc_normal_draws( num_normal_draws=3, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self.dtype(), skip=skip) else: normal_draws = None cond_fn = lambda i, *args: i < steps_num def body_fn(i, written_count, current_vol, current_log_spot, vol_paths, log_spot_paths): """Simulate Heston process to the next time point.""" time_step = dt[i] if normal_draws is None: normals = random.mv_normal_sample( (num_samples, ), mean=tf.zeros([3], dtype=kappa.dtype), seed=seed) else: normals = normal_draws[i] def _next_vol_fn(): return _update_variance(kappa[i], theta[i], epsilon[i], rho[i], current_vol, time_step, normals[..., :2]) # Do not update variance if `time_step > tolerance` next_vol = tf.cond(time_step > tolerance, _next_vol_fn, lambda: current_vol) def _next_log_spot_fn(): return _update_log_spot(kappa[i], theta[i], epsilon[i], rho[i], current_vol, next_vol, current_log_spot, time_step, normals[..., -1]) # Do not update state if `time_step > tolerance` next_log_spot = tf.cond(time_step > tolerance, _next_log_spot_fn, lambda: current_log_spot) vol_paths = tf.cond( keep_mask[i + 1], lambda: vol_paths.write(written_count, next_vol), lambda: vol_paths) log_spot_paths = tf.cond( keep_mask[i + 1], lambda: log_spot_paths.write(written_count, next_log_spot), lambda: log_spot_paths) written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32) return (i + 1, written_count, next_vol, next_log_spot, vol_paths, log_spot_paths) log_spot_paths = tf.TensorArray(dtype=self._dtype, size=times_shape[-1]) vol_paths = tf.TensorArray(dtype=self._dtype, size=times_shape[-1]) _, _, _, _, vol_paths, log_spot_paths = tf.while_loop( cond_fn, body_fn, (0, 0, current_vol, current_log_spot, vol_paths, log_spot_paths), maximum_iterations=steps_num) # TensorArray.stack() produces tensors of unknown shapes log_spot_paths = log_spot_paths.stack() log_spot_paths.set_shape(times_shape + current_log_spot.shape) vol_paths = vol_paths.stack() vol_paths.set_shape(times_shape + current_vol.shape) return tf.stack( [tf.transpose(log_spot_paths), tf.transpose(vol_paths)], -1)
def _sample_paths(self, times, num_samples, random_type, skip, seed, normal_draws=None, times_grid=None, validate_args=False): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [1]. num_requested_times = tf.shape(times)[0] params = [self._mean_reversion, self._volatility] if self._corr_matrix is not None: params = params + [self._corr_matrix] times, keep_mask = _prepare_grid(times, times_grid, *params) # Add zeros as a starting location dt = times[1:] - times[:-1] if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError( 'Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') if normal_draws is None: # In order to use low-discrepancy random_type we need to generate the # sequence of independent random normals upfront. We also precompute # random numbers for stateless random type in order to ensure independent # samples for multiple function calls whith different seeds. if random_type in (random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=self._dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: normal_draws = None else: if validate_args: draws_times = tf.shape(normal_draws)[0] asserts = tf.assert_equal( draws_times, tf.shape(times)[0] - 1, # We have added `0` to `times` message='`tf.shape(normal_draws)[1]` should be equal to the ' 'number of all `times` plus the number of all jumps of ' 'the piecewise constant parameters.') with tf.compat.v1.control_dependencies([asserts]): normal_draws = tf.identity(normal_draws) # The below is OK because we support exact discretization with piecewise # constant mr and vol. mean_reversion = self._mean_reversion(times) volatility = self._volatility(times) if self._corr_matrix is not None: corr_matrix = _get_parameters(times + tf.math.reduce_min(dt) / 2, self._corr_matrix)[0] corr_matrix_root = tf.linalg.cholesky(corr_matrix) else: corr_matrix_root = None exp_x_t = self._conditional_mean_x(times, mean_reversion, volatility) var_x_t = self._conditional_variance_x(times, mean_reversion, volatility) if self._dim == 1: mean_reversion = tf.expand_dims(mean_reversion, axis=0) cond_fn = lambda i, *args: i < tf.size(dt) def body_fn(i, written_count, current_x, rate_paths): """Simulate hull-white process to the next time point.""" if normal_draws is None: normals = random.mv_normal_sample( (num_samples, ), mean=tf.zeros((self._dim, ), dtype=mean_reversion.dtype), random_type=random_type, seed=seed) else: normals = normal_draws[i] if corr_matrix_root is not None: normals = tf.linalg.matvec(corr_matrix_root[i], normals) vol_x_t = tf.math.sqrt(tf.nn.relu(tf.transpose(var_x_t)[i])) # If numerically `vol_x_t == 0`, the gradient of `vol_x_t` becomes `NaN`. # To prevent this, we explicitly set `vol_x_t` to zero tensor at zero # values so that the gradient is set to zero at this values. vol_x_t = tf.where(vol_x_t > 0.0, vol_x_t, 0.0) next_x = ( tf.math.exp(-tf.transpose(mean_reversion)[i + 1] * dt[i]) * current_x + tf.transpose(exp_x_t)[i] + vol_x_t * normals) f_0_t = self._instant_forward_rate_fn(times[i + 1]) # Update `rate_paths` rate_paths = utils.maybe_update_along_axis( tensor=rate_paths, do_update=keep_mask[i + 1], ind=written_count, axis=1, new_tensor=tf.expand_dims(next_x, axis=1) + f_0_t) written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32) return (i + 1, written_count, next_x, rate_paths) rate_paths = tf.zeros((num_samples, num_requested_times, self._dim), dtype=self._dtype) # Include initial state, if necessary f0_t = self._instant_forward_rate_fn(times[0]) rate_paths = utils.maybe_update_along_axis(tensor=rate_paths, do_update=keep_mask[0], ind=0, axis=1, new_tensor=f0_t) written_count = tf.cast(keep_mask[0], dtype=tf.int32) initial_x = tf.zeros((num_samples, self._dim), dtype=self._dtype) # TODO(b/157232803): Use tf.cumsum instead? _, _, _, rate_paths = tf.while_loop( cond_fn, body_fn, (0, written_count, initial_x, rate_paths)) return rate_paths
def _sample_paths(self, times, num_samples, random_type, skip, seed): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [1]. num_requested_times = times.shape[0] params = [self._mean_reversion, self._volatility, self._corr_matrix] if self._corr_matrix is not None: params = params + [self._corr_matrix] times, keep_mask = _prepare_grid(times, params) # Add zeros as a starting location dt = times[1:] - times[:-1] if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError( 'Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') # In order to use low-discrepancy random_type we need to generate the # sequence of independent random normals upfront. We also precompute random # numbers for stateless random type in order to ensure independent samples # for multiple function calls whith different seeds. if random_type in (random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=self._dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: normal_draws = None # The below is OK because we support exact discretization with piecewise # constant mr and vol. mean_reversion = self._mean_reversion(times) volatility = self._volatility(times) if self._corr_matrix is not None: corr_matrix = _get_parameters(times + tf.math.reduce_min(dt) / 2, self._corr_matrix)[0] corr_matrix_root = tf.linalg.cholesky(corr_matrix) else: corr_matrix_root = None exp_x_t = self._conditional_mean_x(times, mean_reversion, volatility) var_x_t = self._conditional_variance_x(times, mean_reversion, volatility) if self._dim == 1: mean_reversion = tf.expand_dims(mean_reversion, axis=0) cond_fn = lambda i, *args: i < tf.size(dt) def body_fn(i, written_count, current_x, rate_paths): """Simulate hull-white process to the next time point.""" if normal_draws is None: normals = random.mv_normal_sample( (num_samples, ), mean=tf.zeros((self._dim, ), dtype=mean_reversion.dtype), random_type=random_type, seed=seed) else: normals = normal_draws[i] if corr_matrix_root is not None: normals = tf.linalg.matvec(corr_matrix_root[i], normals) next_x = ( tf.math.exp(-mean_reversion[:, i + 1] * dt[i]) * current_x + exp_x_t[:, i] + tf.math.sqrt(var_x_t[:, i]) * normals) f_0_t = self._instant_forward_rate_fn(times[i + 1]) # Update `rate_paths` rate_paths = utils.maybe_update_along_axis( tensor=rate_paths, do_update=keep_mask[i + 1], ind=written_count, axis=1, new_tensor=tf.expand_dims(next_x, axis=1) + f_0_t) written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32) return (i + 1, written_count, next_x, rate_paths) rate_paths = tf.zeros((num_samples, num_requested_times, self._dim), dtype=self._dtype) initial_x = tf.zeros((num_samples, self._dim), dtype=self._dtype) # TODO(b/157232803): Use tf.cumsum instead? _, _, _, rate_paths = tf.while_loop(cond_fn, body_fn, (0, 0, initial_x, rate_paths)) return rate_paths
def _sabr_sample_paths(self, initial_forward, initial_volatility, times, time_step, num_samples, random_type, seed, name, precompute_normal_draws): """Returns a sample of paths from the process.""" cond_fn = lambda index, *args: index < tf.size(times) # In order to use low-discrepancy random_type we need to generate the # sequence of independent random normals upfront. We also precompute random # numbers for stateless random type in order to ensure independent samples # for multiple function calls with different seeds. if precompute_normal_draws or random_type in ( random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): num_time_steps = tf.math.ceil(tf.math.divide(times[-1], time_step)) + times.shape[0] # We need a [3] + initial_forward.shape tensor of random draws. # This will be accessed by normal_draws_index. num_normal_draws = 3 * tf.size(initial_forward, out_type=tf.float64) normal_draws = utils.generate_mc_normal_draws( num_normal_draws=num_normal_draws, num_time_steps=num_time_steps, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype) else: normal_draws = None def body_fn(index, current_time, forward, vol, forward_paths, vol_paths, normal_draws_index): """Simulate Sabr process to the next time point.""" forward, vol, normal_draws_index = self._propagate_to_time( forward, vol, current_time, times[index], time_step, random_type, seed, normal_draws, normal_draws_index) # Always update paths in outer loop. forward_paths = utils.maybe_update_along_axis( tensor=forward_paths, do_update=True, ind=index, axis=1, new_tensor=tf.expand_dims(forward, axis=1)) vol_paths = utils.maybe_update_along_axis( tensor=vol_paths, do_update=True, ind=index, axis=1, new_tensor=tf.expand_dims(vol, axis=1)) return index + 1, times[ index], forward, vol, forward_paths, vol_paths, normal_draws_index shape = (num_samples, times.shape[0]) forward_paths = tf.zeros(shape, dtype=self._dtype) vol_paths = tf.zeros(shape, dtype=self._dtype) forward = tf.zeros( shape=(num_samples,), dtype=self._dtype) + initial_forward vol = tf.zeros(shape=(num_samples,), dtype=self._dtype) + initial_volatility start_time = tf.constant(0, dtype=self._dtype) _, _, _, _, forward_paths, vol_paths, _ = tf.compat.v1.while_loop( cond_fn, body_fn, (0, start_time, forward, vol, forward_paths, vol_paths, 0), maximum_iterations=tf.size(times)) return tf.stack([forward_paths, vol_paths], -1)
def _sample_paths(self, times, num_requested_times, current_log_spot, current_vol, num_samples, random_type, keep_mask, seed, skip, tolerance, precompute_normal_draws, normal_draws): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [1]. dt = times[1:] - times[:-1] # Compute the parameters at `times`. Here + tf.reduce_min(dt) / 2 ensures # that the value is constant between `times`. mean_reversion, theta, volvol, rho = _get_parameters( # pylint: disable=unbalanced-tuple-unpacking times + tf.reduce_min(dt) / 2, self._mean_reversion, self._theta, self._volvol, self._rho) # In order random_type which is not PSEUDO, sequence of independent random # normals should be generated upfront. if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError( 'Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') if normal_draws is None: if precompute_normal_draws or random_type in ( random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=2, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, dtype=self.dtype(), seed=seed, skip=skip) else: # If pseudo or anthithetic sampling is used, proceed with random # sampling at each step. normal_draws = None # Prepare results format written_count = 0 if isinstance(num_requested_times, int) and num_requested_times == 1: record_samples = False log_spot_paths = current_log_spot vol_paths = current_vol else: # If more than one sample has to be recorded, create a TensorArray record_samples = True element_shape = current_log_spot.shape log_spot_paths = tf.TensorArray(dtype=times.dtype, size=num_requested_times, element_shape=element_shape, clear_after_read=False) vol_paths = tf.TensorArray(dtype=times.dtype, size=num_requested_times, element_shape=element_shape, clear_after_read=False) # Include initial state, if necessary log_spot_paths = log_spot_paths.write(written_count, current_log_spot) vol_paths = vol_paths.write(written_count, current_vol) written_count += tf.cast(keep_mask[0], dtype=tf.int32) # Define sampling while_loop body function def cond_fn(i, written_count, *args): # It can happen that `times_grid[-1] > times[-1]` in which case we have # to terminate when `written_count` reaches `num_requested_times` del args return tf.math.logical_and(i < steps_num, written_count < num_requested_times) def body_fn(i, written_count, current_vol, current_log_spot, vol_paths, log_spot_paths): """Simulate Heston process to the next time point.""" time_step = dt[i] if normal_draws is None: normals = random.mv_normal_sample( (num_samples, ), mean=tf.zeros([2], dtype=mean_reversion.dtype), seed=seed) else: normals = normal_draws[i] def _next_vol_fn(): return _update_variance(mean_reversion[i], theta[i], volvol[i], rho[i], current_vol, time_step, normals[..., 0]) # Do not update variance if `time_step > tolerance` next_vol = tf.cond(time_step > tolerance, _next_vol_fn, lambda: current_vol) def _next_log_spot_fn(): return _update_log_spot(mean_reversion[i], theta[i], volvol[i], rho[i], current_vol, next_vol, current_log_spot, time_step, normals[..., 1]) # Do not update state if `time_step > tolerance` next_log_spot = tf.cond(time_step > tolerance, _next_log_spot_fn, lambda: current_log_spot) if record_samples: # Update volatility paths vol_paths = vol_paths.write(written_count, next_vol) # Update log-spot paths log_spot_paths = log_spot_paths.write(written_count, next_log_spot) else: vol_paths = next_vol log_spot_paths = next_log_spot written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32) return (i + 1, written_count, next_vol, next_log_spot, vol_paths, log_spot_paths) # Sample paths _, _, _, _, vol_paths, log_spot_paths = tf.while_loop( cond_fn, body_fn, (0, 0, current_vol, current_log_spot, vol_paths, log_spot_paths), maximum_iterations=steps_num) if not record_samples: # shape [num_samples, 1] vol_paths = tf.expand_dims(vol_paths, axis=-1) log_spot_paths = tf.expand_dims(log_spot_paths, axis=-1) # shape [num_samples, 1, 1] return tf.stack([log_spot_paths, vol_paths], -1) # Shape [num_time_points] + [num_samples] vol_paths = vol_paths.stack() log_spot_paths = log_spot_paths.stack() # transpose to shape [num_samples, num_time_points] vol_paths = tf.transpose(vol_paths) log_spot_paths = tf.transpose(log_spot_paths) # Shape [num_samples, num_time_points, 2] return tf.stack([log_spot_paths, vol_paths], -1)
def _sample(*, dim, drift_fn, volatility_fn, times, time_step, keep_mask, num_requested_times, num_samples, initial_state, random_type, seed, swap_memory, skip, precompute_normal_draws, dtype): """Returns a sample of paths from the process using Euler method.""" dt = times[1:] - times[:-1] sqrt_dt = tf.sqrt(dt) current_state = initial_state + tf.zeros([num_samples, dim], dtype=initial_state.dtype) if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError('Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') # In order to use low-discrepancy random_type we need to generate the sequence # of independent random normals upfront. We also precompute random numbers # for stateless random type in order to ensure independent samples for # multiple function calls whith different seeds. if precompute_normal_draws or random_type in ( random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, dtype=dtype, seed=seed, skip=skip) wiener_mean = None else: # If pseudo or anthithetic sampling is used, proceed with random sampling # at each step. wiener_mean = tf.zeros((dim,), dtype=dtype, name='wiener_mean') normal_draws = None cond_fn = lambda i, *args: i < steps_num # Maximum number iterations is passed to the while loop below. It improves # performance of the while loop on a GPU and is needed for XLA-compilation # comptatiblity. def step_fn(i, written_count, current_state, result): return _euler_step( i=i, written_count=written_count, current_state=current_state, result=result, drift_fn=drift_fn, volatility_fn=volatility_fn, wiener_mean=wiener_mean, num_samples=num_samples, times=times, dt=dt, sqrt_dt=sqrt_dt, keep_mask=keep_mask, random_type=random_type, seed=seed, normal_draws=normal_draws) maximum_iterations = (tf.cast(1. / time_step, dtype=tf.int32) + tf.size(times)) result = tf.zeros((num_samples, num_requested_times, dim), dtype=dtype) _, _, _, result = tf.while_loop( cond_fn, step_fn, (0, 0, current_state, result), maximum_iterations=maximum_iterations, swap_memory=swap_memory) return result
def _sample_paths(self, times, time_step, num_samples, random_type, skip, seed): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [2]. times, keep_mask = _prepare_grid(times, time_step) # Add zeros as a starting location dt = times[1:] - times[:-1] if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # In order to use low-discrepancy random_type we need to generate the # sequence of independent random normals upfront. We also precompute random # numbers for stateless random type in order to ensure independent samples # for multiple function calls whith different seeds. if random_type in (random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=self._dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: normal_draws = None cond_fn = lambda i, *args: i < tf.size(dt) def body_fn(i, written_count, current_x, current_y, x_paths, y_paths): """Simulate qG-HJM process to the next time point.""" if normal_draws is None: normals = random.mv_normal_sample( (num_samples,), mean=tf.zeros((self._dim,), dtype=self._dtype), random_type=random_type, seed=seed) else: normals = normal_draws[i] if self._sqrt_rho is not None: normals = tf.linalg.matvec(self._sqrt_rho, normals) vol = self._volatility(times[i + 1], current_x) next_x = (current_x + (current_y - self._mean_reversion * current_x) * dt[i] + vol * normals * tf.math.sqrt(dt[i])) next_y = current_y + (vol**2 - 2.0 * self._mean_reversion * current_y) * dt[i] # Update `x_paths` and `y_paths` x_paths = utils.maybe_update_along_axis( tensor=x_paths, do_update=True, ind=written_count + 1, axis=1, new_tensor=tf.expand_dims(next_x, axis=1)) y_paths = utils.maybe_update_along_axis( tensor=y_paths, do_update=True, ind=written_count + 1, axis=1, new_tensor=tf.expand_dims(next_y, axis=1)) written_count += 1 return (i + 1, written_count, next_x, next_y, x_paths, y_paths) x_paths = tf.zeros((num_samples, times.shape.as_list()[0], self._factors), dtype=self._dtype) y_paths = tf.zeros((num_samples, times.shape.as_list()[0], self._factors), dtype=self._dtype) initial_x = tf.zeros((num_samples, self._factors), dtype=self._dtype) initial_y = tf.zeros((num_samples, self._factors), dtype=self._dtype) _, _, _, _, x_paths, y_paths = tf.while_loop( cond_fn, body_fn, (0, 0, initial_x, initial_y, x_paths, y_paths)) f_0_t = self._instant_forward_rate_fn(times) # shape=(num_times,) rate_paths = tf.math.reduce_sum( x_paths, axis=-1) + f_0_t # shape=(num_samples, num_times) discount_factor_paths = tf.math.exp(-rate_paths[:, :-1] * dt) discount_factor_paths = tf.concat( [tf.ones((num_samples, 1), dtype=self._dtype), discount_factor_paths], axis=1) # shape=(num_samples, num_times) discount_factor_paths = utils.cumprod_using_matvec(discount_factor_paths) return ( tf.boolean_mask(rate_paths, keep_mask, axis=1), tf.boolean_mask(discount_factor_paths, keep_mask, axis=1), tf.boolean_mask(x_paths, keep_mask, axis=1), tf.boolean_mask(y_paths, keep_mask, axis=1) )
def _sample(*, dim, drift_fn, volatility_fn, grad_volatility_fn, times, time_step, keep_mask, num_requested_times, num_samples, initial_state, random_type, seed, swap_memory, skip, precompute_normal_draws, watch_params, time_indices, input_gradients, stratonovich_order, dtype): """Returns a sample of paths from the process using the Milstein method.""" dt = times[1:] - times[:-1] sqrt_dt = tf.sqrt(dt) current_state = initial_state + tf.zeros([num_samples, dim], dtype=initial_state.dtype) if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # In order to use low-discrepancy random_type we need to generate the sequence # of independent random normals upfront. We also precompute random numbers # for stateless random type in order to ensure independent samples for # multiple function calls with different seeds. if precompute_normal_draws or random_type in ( random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): # Process dimension plus auxiliary random variables for stratonovich # integral computation. all_normal_draws = utils.generate_mc_normal_draws( num_normal_draws=dim + 3 * dim * stratonovich_order, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, dtype=dtype, seed=seed, skip=skip) normal_draws = all_normal_draws[:, :, :dim] wiener_mean = None # Auxiliary normal draws for use with the stratonovich integral # approximation. aux_normal_draws = [] start = dim for _ in range(3): end = start + dim * stratonovich_order aux_normal_draws.append(all_normal_draws[:, :, start:end]) start = end else: # If pseudo or anthithetic sampling is used, proceed with random sampling # at each step. wiener_mean = tf.zeros((dim, ), dtype=dtype, name='wiener_mean') normal_draws = None aux_normal_draws = None if watch_params is None: # Use while_loop if `watch_params` is not passed return _while_loop(dim=dim, steps_num=steps_num, current_state=current_state, drift_fn=drift_fn, volatility_fn=volatility_fn, grad_volatility_fn=grad_volatility_fn, wiener_mean=wiener_mean, num_samples=num_samples, times=times, dt=dt, sqrt_dt=sqrt_dt, time_step=time_step, keep_mask=keep_mask, num_requested_times=num_requested_times, swap_memory=swap_memory, random_type=random_type, seed=seed, normal_draws=normal_draws, input_gradients=input_gradients, stratonovich_order=stratonovich_order, aux_normal_draws=aux_normal_draws, dtype=dtype) else: # Use custom for_loop if `watch_params` is specified return _for_loop(dim=dim, steps_num=steps_num, current_state=current_state, drift_fn=drift_fn, volatility_fn=volatility_fn, grad_volatility_fn=grad_volatility_fn, wiener_mean=wiener_mean, num_samples=num_samples, times=times, dt=dt, sqrt_dt=sqrt_dt, time_indices=time_indices, keep_mask=keep_mask, watch_params=watch_params, random_type=random_type, seed=seed, normal_draws=normal_draws, input_gradients=input_gradients, stratonovich_order=stratonovich_order, aux_normal_draws=aux_normal_draws)
def _sample_paths(self, times, num_requested_times, current_rates, current_instant_forward_rates, num_samples, random_type, skip, keep_mask, seed): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [1]. # Add zeros as a starting location dt = times[1:] - times[:-1] if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError( 'Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') # In order to use low-discrepancy random_type we need to generate the # sequence of independent random normals upfront. We also precompute random # numbers for stateless random type in order to ensure independent samples # for multiple function calls whith different seeds. if random_type in (random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=self._dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: normal_draws = None mean_reversion, volatility = _get_parameters( # pylint: disable=unbalanced-tuple-unpacking times + tf.math.reduce_min(dt) / 2, self._mean_reversion, self._volatility) if self._corr_matrix is not None: corr_matrix = _get_parameters(times + tf.math.reduce_min(dt) / 2, self._corr_matrix)[0] corr_matrix_root = tf.linalg.cholesky(corr_matrix) else: corr_matrix_root = None cond_fn = lambda i, *args: i < tf.size(dt) def body_fn(i, written_count, current_rates, current_instant_forward_rates, rate_paths): """Simulate Heston process to the next time point.""" current_time = times[i] next_time = times[i + 1] if normal_draws is None: normals = random.mv_normal_sample( (num_samples, ), mean=tf.zeros((self._dim, ), dtype=mean_reversion.dtype), random_type=random_type, seed=seed) else: normals = normal_draws[i] next_rates, next_instant_forward_rates = _sample_at_next_time( i, next_time, current_time, mean_reversion[i], volatility[i], self._instant_forward_rate_fn, current_instant_forward_rates, current_rates, corr_matrix_root, normals) # Update `rate_paths` rate_paths = utils.maybe_update_along_axis( tensor=rate_paths, do_update=keep_mask[i + 1], ind=written_count, axis=1, new_tensor=tf.expand_dims(next_rates, axis=1)) written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32) return (i + 1, written_count, next_rates, next_instant_forward_rates, rate_paths) rate_paths = tf.zeros((num_samples, num_requested_times, self._dim), dtype=self._dtype) _, _, _, _, rate_paths = tf.while_loop( cond_fn, body_fn, (0, 0, current_rates, current_instant_forward_rates, rate_paths)) return rate_paths
def _sabr_sample_paths(self, initial_forward, initial_volatility, times, time_step, num_samples, random_type, seed, precompute_normal_draws, skip): """Returns a sample of paths from the process.""" num_requested_times = tff_utils.get_shape(times)[0] # Prepare results format forward = tf.zeros(shape=(num_samples, ), dtype=self._dtype) + initial_forward vol = tf.zeros(shape=(num_samples, ), dtype=self._dtype) + initial_volatility if isinstance(num_requested_times, int) and num_requested_times == 1: record_samples = False forward_paths = forward vol_paths = vol else: # If more than one sample has to be recorded, create a TensorArray record_samples = True element_shape = forward.shape forward_paths = tf.TensorArray(dtype=times.dtype, size=num_requested_times, element_shape=element_shape, clear_after_read=False) vol_paths = tf.TensorArray(dtype=times.dtype, size=num_requested_times, element_shape=element_shape, clear_after_read=False) # Define sampling while_loop body function cond_fn = lambda index, *args: index < tf.size(times) # In order to use low-discrepancy random_type we need to generate the # sequence of independent random normals upfront. We also precompute random # numbers for stateless random type in order to ensure independent samples # for multiple function calls with different seeds. if precompute_normal_draws or random_type in ( random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED, random.RandomType.STATELESS, random.RandomType.STATELESS_ANTITHETIC): num_time_steps = tf.cast(tf.math.ceil( tf.math.divide(times[-1], time_step)), dtype=tf.int32) + times.shape[0] # We need a [3] + initial_forward.shape tensor of random draws. # This will be accessed by normal_draws_index. num_normal_draws = 3 * tf.size(initial_forward) normal_draws = utils.generate_mc_normal_draws( num_normal_draws=num_normal_draws, num_time_steps=num_time_steps, num_sample_paths=num_samples, random_type=random_type, seed=seed, skip=skip, dtype=self._dtype) else: normal_draws = None def body_fn(index, current_time, forward, vol, forward_paths, vol_paths, normal_draws_index): """Simulate Sabr process to the next time point.""" forward, vol, normal_draws_index = self._propagate_to_time( forward, vol, current_time, times[index], time_step, random_type, seed, normal_draws, normal_draws_index, num_time_steps) # Always update paths in outer loop. if record_samples: # Update volatility paths vol_paths = vol_paths.write(index, vol) # Update forward paths forward_paths = forward_paths.write(index, forward) else: vol_paths = vol forward_paths = forward return index + 1, times[ index], forward, vol, forward_paths, vol_paths, normal_draws_index start_time = tf.constant(0, dtype=self._dtype) # Sample paths _, _, _, _, forward_paths, vol_paths, _ = tf.while_loop( cond_fn, body_fn, (0, start_time, forward, vol, forward_paths, vol_paths, 0), maximum_iterations=tf.size(times)) if not record_samples: # shape [num_samples, 1] vol_paths = tf.expand_dims(vol_paths, axis=-1) forward_paths = tf.expand_dims(forward_paths, axis=-1) # shape [num_samples, 1, 1] return tf.stack([forward_paths, vol_paths], -1) # Shape [num_time_points] + [num_samples] vol_paths = vol_paths.stack() forward_paths = forward_paths.stack() # transpose to shape [num_samples, num_time_points] vol_paths = tf.transpose(vol_paths) forward_paths = tf.transpose(forward_paths) # Shape [num_samples, num_time_points, 2] return tf.stack([forward_paths, vol_paths], -1)
def _sample_paths(self, times, num_requested_times, current_log_spot, current_vol, num_samples, random_type, keep_mask, seed, skip, tolerance): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [1]. dt = times[1:] - times[:-1] # Compute the parameters at `times`. Here + tf.reduce_min(dt) / 2 ensures # that the value is constant between `times`. mean_reversion, theta, volvol, rho = _get_parameters( # pylint: disable=unbalanced-tuple-unpacking times + tf.reduce_min(dt) / 2, self._mean_reversion, self._theta, self._volvol, self._rho) # In order random_type which is not PSEUDO, sequence of independent random # normals should be generated upfront. if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError( 'Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') if random_type != random.RandomType.PSEUDO: # Note that at each iteration we need 2 random draws. normal_draws = utils.generate_mc_normal_draws( num_normal_draws=2, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self.dtype(), skip=skip) else: normal_draws = None cond_fn = lambda i, *args: i < steps_num def body_fn(i, written_count, current_vol, current_log_spot, vol_paths, log_spot_paths): """Simulate Heston process to the next time point.""" time_step = dt[i] if normal_draws is None: normals = random.mv_normal_sample( (num_samples, ), mean=tf.zeros([2], dtype=mean_reversion.dtype), seed=seed) else: normals = normal_draws[i] def _next_vol_fn(): return _update_variance(mean_reversion[i], theta[i], volvol[i], rho[i], current_vol, time_step, normals[..., 0]) # Do not update variance if `time_step > tolerance` next_vol = tf.cond(time_step > tolerance, _next_vol_fn, lambda: current_vol) def _next_log_spot_fn(): return _update_log_spot(mean_reversion[i], theta[i], volvol[i], rho[i], current_vol, next_vol, current_log_spot, time_step, normals[..., 1]) # Do not update state if `time_step > tolerance` next_log_spot = tf.cond(time_step > tolerance, _next_log_spot_fn, lambda: current_log_spot) # Update volatility paths vol_paths = utils.maybe_update_along_axis( tensor=vol_paths, do_update=keep_mask[i + 1], ind=written_count, axis=1, new_tensor=tf.expand_dims(next_vol, axis=1)) # Update log-spot paths log_spot_paths = utils.maybe_update_along_axis( tensor=log_spot_paths, do_update=keep_mask[i + 1], ind=written_count, axis=1, new_tensor=tf.expand_dims(next_log_spot, axis=1)) written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32) return (i + 1, written_count, next_vol, next_log_spot, vol_paths, log_spot_paths) shape = (num_samples, num_requested_times) log_spot_paths = tf.zeros(shape, dtype=self._dtype) vol_paths = tf.zeros(shape, dtype=self._dtype) _, _, _, _, vol_paths, log_spot_paths = tf.while_loop( cond_fn, body_fn, (0, 0, current_vol, current_log_spot, vol_paths, log_spot_paths), maximum_iterations=steps_num) return tf.stack([log_spot_paths, vol_paths], -1)
def _sample_paths(self, times, times_shape, current_rates, current_instant_forward_rates, num_samples, random_type, skip, keep_mask, seed): """Returns a sample of paths from the process.""" # Note: all the notations below are the same as in [1]. # Add zeros as a starting location dt = times[1:] - times[:-1] if dt.shape.is_fully_defined(): steps_num = dt.shape.as_list()[-1] else: steps_num = tf.shape(dt)[-1] # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released. if random_type == random.RandomType.SOBOL: raise ValueError('Sobol sequence for Euler sampling is temporarily ' 'unsupported when `time_step` or `times` have a ' 'non-constant value') # In order to use low-discrepancy random_type we need to generate the # sequence of independent random normals upfront. if random_type in (random.RandomType.SOBOL, random.RandomType.HALTON, random.RandomType.HALTON_RANDOMIZED): normal_draws = utils.generate_mc_normal_draws( num_normal_draws=self._dim, num_time_steps=steps_num, num_sample_paths=num_samples, random_type=random_type, seed=seed, dtype=self._dtype, skip=skip) else: normal_draws = None mean_reversion, volatility = _get_parameters( # pylint: disable=unbalanced-tuple-unpacking times + tf.math.reduce_min(dt) / 2, self._mean_reversion, self._volatility) if self._corr_matrix is not None: corr_matrix = _get_parameters( times + tf.math.reduce_min(dt) / 2, self._corr_matrix)[0] corr_matrix_root = tf.linalg.cholesky(corr_matrix) else: corr_matrix_root = None cond_fn = lambda i, *args: i < tf.size(dt) def body_fn(i, written_count, current_rates, current_instant_forward_rates, rate_paths): """Simulate Heston process to the next time point.""" current_time = times[i] next_time = times[i + 1] if normal_draws is None: normals = random.mv_normal_sample( (num_samples,), mean=tf.zeros((self._dim,), dtype=mean_reversion.dtype), random_type=random_type, seed=seed) else: normals = normal_draws[i] next_rates, next_instant_forward_rates = _sample_at_next_time( i, next_time, current_time, mean_reversion[i], volatility[i], self._instant_forward_rate_fn, current_instant_forward_rates, current_rates, corr_matrix_root, normals) rate_paths = tf.cond(keep_mask[i + 1], lambda: rate_paths.write(written_count, next_rates), lambda: rate_paths) written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32) return (i + 1, written_count, next_rates, next_instant_forward_rates, rate_paths) rate_paths = tf.TensorArray(dtype=self._dtype, size=times_shape[-1]) _, _, _, _, rate_paths = tf.while_loop( cond_fn, body_fn, (0, 0, current_rates, current_instant_forward_rates, rate_paths)) rate_paths = tf.transpose(rate_paths.stack(), (1, 0, 2)) # Shape of `rate_paths` is dynamic because of `TensorArray`. # In order to make the shape static, use `set_shape` method rate_paths.set_shape(current_rates.shape[:1] + times_shape + current_rates.shape[-1:]) return rate_paths