Esempio n. 1
0
        def body_fn(i, written_count, current_x, rate_paths):
            """Simulate hull-white process to the next time point."""
            if normal_draws is None:
                normals = random.mv_normal_sample(
                    (num_samples, ),
                    mean=tf.zeros((self._dim, ), dtype=mean_reversion.dtype),
                    random_type=random_type,
                    seed=seed)
            else:
                normals = normal_draws[i]

            if corr_matrix_root is not None:
                normals = tf.linalg.matvec(corr_matrix_root[i], normals)

            next_x = (
                tf.math.exp(-mean_reversion[:, i + 1] * dt[i]) * current_x +
                exp_x_t[:, i] + tf.math.sqrt(var_x_t[:, i]) * normals)
            f_0_t = self._instant_forward_rate_fn(times[i + 1])

            # Update `rate_paths`
            rate_paths = utils.maybe_update_along_axis(
                tensor=rate_paths,
                do_update=keep_mask[i + 1],
                ind=written_count,
                axis=1,
                new_tensor=tf.expand_dims(next_x, axis=1) + f_0_t)
            written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
            return (i + 1, written_count, next_x, rate_paths)
 def body_fn(i, written_count,
             current_rates,
             current_instant_forward_rates,
             rate_paths):
   """Simulate Heston process to the next time point."""
   current_time = times[i]
   next_time = times[i + 1]
   if normal_draws is None:
     normals = random.mv_normal_sample(
         (num_samples,),
         mean=tf.zeros((self._dim,), dtype=mean_reversion.dtype),
         random_type=random_type, seed=seed)
   else:
     normals = normal_draws[i]
   next_rates, next_instant_forward_rates = _sample_at_next_time(
       i, next_time, current_time,
       mean_reversion[i], volatility[i],
       self._instant_forward_rate_fn,
       current_instant_forward_rates,
       current_rates, corr_matrix_root, normals)
   rate_paths = tf.cond(keep_mask[i + 1],
                        lambda: rate_paths.write(written_count, next_rates),
                        lambda: rate_paths)
   written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
   return (i + 1, written_count,
           next_rates,
           next_instant_forward_rates,
           rate_paths)
Esempio n. 3
0
def _euler_step(i, written_count, current_state, result, drift_fn,
                volatility_fn, wiener_mean, num_samples, times, dt, sqrt_dt,
                keep_mask, random_type, seed):
    """Performs one step of Euler scheme."""
    current_time = times[i + 1]
    # In order to use Halton or Sobol random_type we need to set the `skip`
    # argument that enusres new points are sampled at every iteration
    skip = i * num_samples
    dw = random.mv_normal_sample((num_samples, ),
                                 mean=wiener_mean,
                                 random_type=random_type,
                                 seed=seed,
                                 skip=skip)
    dw = dw * sqrt_dt[i]
    dt_inc = dt[i] * drift_fn(current_time, current_state)  # pylint: disable=not-callable
    dw_inc = tf.squeeze(
        tf.matmul(volatility_fn(current_time, current_state), dw), -1)  # pylint: disable=not-callable
    next_state = current_state + dt_inc + dw_inc

    # Keep only states for times, requested by user.
    result = tf.cond(keep_mask[i + 1],
                     (lambda: result.write(written_count, next_state)),
                     (lambda: result))
    written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
    return (i + 1, written_count, next_state, result)
Esempio n. 4
0
        def step_fn(i, written_count, current_state, result):
            """Performs one step of Euler scheme."""
            current_time = times[i + 1]
            dw = random_ops.mv_normal_sample((num_samples, ),
                                             mean=wiener_mean,
                                             random_type=random_type,
                                             seed=seed)
            dw = dw * sqrt_dt[i]
            dt_inc = dt[i] * self.drift_fn()(current_time, current_state)  # pylint: disable=not-callable
            dw_inc = tf.squeeze(
                tf.matmul(self.volatility_fn()(current_time, current_state),
                          dw), -1)  # pylint: disable=not-callable
            next_state = current_state + dt_inc + dw_inc

            def write_next_state_to_result():
                # Replace result[:, written_count, :] with next_state.
                one_hot = tf.one_hot(written_count, depth=num_requested_times)
                mask = tf.expand_dims(one_hot > 0, axis=-1)
                return tf.where(mask, tf.expand_dims(next_state, axis=1),
                                result)

            # Keep only states for times requested by user.
            result = tf.cond(keep_mask[i + 1], write_next_state_to_result,
                             lambda: result)
            written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
            return i + 1, written_count, next_state, result
Esempio n. 5
0
def _update_log_spot(i,
                     kappa,
                     theta,
                     epsilon,
                     rho,
                     current_var,
                     next_var,
                     current_log_spot,
                     time_step,
                     num_samples,
                     random_type,
                     seed,
                     gamma_1=0.5,
                     gamma_2=0.5):
    """Updates log-spot value."""
    skip = (3 * i + 2) * num_samples
    normals = random_ops.mv_normal_sample(
        (num_samples, ),
        mean=tf.constant([0.0], dtype=current_var.dtype),
        random_type=random_type,
        seed=seed,
        skip=skip)
    k_0 = -rho * kappa * theta / epsilon * time_step
    k_1 = (gamma_1 * time_step * (kappa * rho / epsilon - 0.5) - rho / epsilon)
    k_2 = (gamma_2 * time_step * (kappa * rho / epsilon - 0.5) + rho / epsilon)
    k_3 = gamma_1 * time_step * (1 - rho**2)
    k_4 = gamma_2 * time_step * (1 - rho**2)

    next_log_spot = (
        current_log_spot + k_0 + k_1 * current_var + k_2 * next_var +
        tf.sqrt(k_3 * current_var + k_4 * next_var) * tf.squeeze(normals))
    return next_log_spot
Esempio n. 6
0
 def body_fn(i, written_count, current_rates,
             current_instant_forward_rates, rate_paths):
     """Simulate Heston process to the next time point."""
     current_time = times[i]
     next_time = times[i + 1]
     if normal_draws is None:
         normals = random.mv_normal_sample(
             (num_samples, ),
             mean=tf.zeros((self._dim, ), dtype=mean_reversion.dtype),
             random_type=random_type,
             seed=seed)
     else:
         normals = normal_draws[i]
     next_rates, next_instant_forward_rates = _sample_at_next_time(
         i, next_time, current_time, mean_reversion[i], volatility[i],
         self._instant_forward_rate_fn, current_instant_forward_rates,
         current_rates, corr_matrix_root, normals)
     # Update `rate_paths`
     rate_paths = utils.maybe_update_along_axis(
         tensor=rate_paths,
         do_update=keep_mask[i + 1],
         ind=written_count,
         axis=1,
         new_tensor=tf.expand_dims(next_rates, axis=1))
     written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
     return (i + 1, written_count, next_rates,
             next_instant_forward_rates, rate_paths)
def _euler_step(*, i, written_count, current_state, result, drift_fn,
                volatility_fn, wiener_mean, num_samples, times, dt, sqrt_dt,
                keep_mask, random_type, seed, normal_draws):
    """Performs one step of Euler scheme."""
    current_time = times[i + 1]
    written_count = tf.cast(written_count, tf.int32)
    if normal_draws is not None:
        dw = normal_draws[i]
    else:
        dw = random.mv_normal_sample((num_samples, ),
                                     mean=wiener_mean,
                                     random_type=random_type,
                                     seed=seed)
    dw = dw * sqrt_dt[i]
    dt_inc = dt[i] * drift_fn(current_time, current_state)  # pylint: disable=not-callable
    dw_inc = tf.linalg.matvec(volatility_fn(current_time, current_state), dw)  # pylint: disable=not-callable
    next_state = current_state + dt_inc + dw_inc
    result = utils.maybe_update_along_axis(tensor=result,
                                           do_update=keep_mask[i + 1],
                                           ind=written_count,
                                           axis=1,
                                           new_tensor=tf.expand_dims(
                                               next_state, axis=1))
    written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
    return i + 1, written_count, next_state, result
Esempio n. 8
0
    def body_fn(current_time, forward, vol, normal_draws_index):
      """Simulate Sabr process for one time step."""
      if normal_draws is not None:
        random_numbers = normal_draws[normal_draws_index]
        random_numbers = tf.reshape(random_numbers, [3] + forward.shape)
      else:
        random_numbers = random.mv_normal_sample(
            [3] + forward.shape,
            mean=tf.constant([0.0], dtype=self._dtype),
            random_type=random_type,
            seed=seed)
        random_numbers = tf.squeeze(random_numbers, -1)
      dwv = random_numbers[0]
      uniforms = 0.5 * (1 + tf.math.erf(random_numbers[1]))
      z = random_numbers[2]

      time_to_end = end_time - current_time
      dt = tf.compat.v2.where(time_to_end <= time_step, time_to_end, time_step)

      next_vol = self._sample_next_volatilities(vol, dt, dwv)
      iv = self._sample_integrated_variance(vol, next_vol, dt)
      next_forward = self._sample_forwards(forward, vol, next_vol, iv, uniforms,
                                           z)

      return current_time + dt, next_forward, next_vol, normal_draws_index + 1
Esempio n. 9
0
        def body_fn(i, written_count, current_x, rate_paths):
            """Simulate hull-white process to the next time point."""
            if normal_draws is None:
                normals = random.mv_normal_sample(
                    (num_samples, ),
                    mean=tf.zeros((self._dim, ), dtype=mean_reversion.dtype),
                    random_type=random_type,
                    seed=seed)
            else:
                normals = normal_draws[i]

            if corr_matrix_root is not None:
                normals = tf.linalg.matvec(corr_matrix_root[i], normals)
            vol_x_t = tf.math.sqrt(tf.nn.relu(tf.transpose(var_x_t)[i]))
            # If numerically `vol_x_t == 0`, the gradient of `vol_x_t` becomes `NaN`.
            # To prevent this, we explicitly set `vol_x_t` to zero tensor at zero
            # values so that the gradient is set to zero at this values.
            vol_x_t = tf.where(vol_x_t > 0.0, vol_x_t, 0.0)
            next_x = (
                tf.math.exp(-tf.transpose(mean_reversion)[i + 1] * dt[i]) *
                current_x + tf.transpose(exp_x_t)[i] + vol_x_t * normals)
            f_0_t = self._instant_forward_rate_fn(times[i + 1])

            # Update `rate_paths`
            rate_paths = utils.maybe_update_along_axis(
                tensor=rate_paths,
                do_update=keep_mask[i + 1],
                ind=written_count,
                axis=1,
                new_tensor=tf.expand_dims(next_x, axis=1) + f_0_t)
            written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
            return (i + 1, written_count, next_x, rate_paths)
Esempio n. 10
0
def _sample(dim, drift_fn, volatility_fn, times, time_step, keep_mask,
            times_size, num_samples, initial_state, random_type, seed,
            swap_memory, skip, dtype):
  """Returns a sample of paths from the process using Euler method."""
  dt = times[1:] - times[:-1]
  sqrt_dt = tf.sqrt(dt)
  current_state = initial_state + tf.zeros([num_samples, dim],
                                           dtype=initial_state.dtype)
  if dt.shape.is_fully_defined():
    steps_num = dt.shape.as_list()[-1]
  else:
    steps_num = tf.shape(dt)[-1]
    # TODO(b/148133811): Re-enable Sobol test when TF 2.2 is released.
    if random_type == random.RandomType.SOBOL:
      raise ValueError('Sobol sequence for Euler sampling is temporarily '
                       'unsupported when `time_step` or `times` have a '
                       'non-constant value')
  # In order to use low-discrepancy random_type we need to generate the sequence
  # of independent random normals upfront.
  if random_type in (random.RandomType.SOBOL,
                     random.RandomType.HALTON,
                     random.RandomType.HALTON_RANDOMIZED):
    # The number of iterations times the dimensionality of the process  is the
    # dimension of the low-discrepancy sequence
    qmc_dimension = tf.zeros([steps_num * dim], dtype=dtype)
    normal_draws = random.mv_normal_sample(
        [num_samples], mean=qmc_dimension,
        random_type=random_type,
        seed=seed, skip=skip)
    # Reshape and transpose for XLA-compatibility
    normal_draws = tf.reshape(normal_draws, [num_samples, steps_num, dim])
    normal_draws = tf.transpose(normal_draws, [1, 0, 2])
  else:
    normal_draws = None
  # If pseudo or anthithetic sampling is used, proceed with random sampling
  # at each step
  wiener_mean = tf.zeros((dim,), dtype=dtype)

  cond_fn = lambda i, *args: i < steps_num
  # Maximum number iterations is passed to the while loop below. It improves
  # performance of the while loop on a GPU and is needed for XLA-compilation
  # comptatiblity
  def step_fn(i, written_count, current_state, result):
    return _euler_step(i, written_count, current_state, result,
                       drift_fn, volatility_fn, wiener_mean,
                       num_samples, times, dt, sqrt_dt, keep_mask,
                       random_type, seed, normal_draws)
  maximum_iterations = (tf.cast(1. / time_step, dtype=tf.int32)
                        + tf.size(times))
  result = tf.TensorArray(dtype=dtype, size=times_size)
  _, _, _, result = tf.compat.v1.while_loop(
      cond_fn, step_fn, (0, 0, current_state, result),
      maximum_iterations=maximum_iterations,
      swap_memory=swap_memory)

  return tf.transpose(result.stack(), (1, 0, 2))
Esempio n. 11
0
def _update_variance(i,
                     kappa,
                     theta,
                     epsilon,
                     rho,
                     current_var,
                     time_step,
                     num_samples,
                     random_type,
                     seed,
                     psi_c=1.5):
    """Updates variance value."""
    del rho
    psi_c = tf.convert_to_tensor(psi_c, dtype=kappa.dtype)
    scaled_time = tf.exp(-kappa * time_step)
    epsilon_squared = epsilon**2
    m = theta + (current_var - theta) * scaled_time
    s_squared = (current_var * epsilon_squared * scaled_time / kappa *
                 (1 - scaled_time) + theta * epsilon_squared / 2 / kappa *
                 (1 - scaled_time)**2)
    psi = s_squared / m**2
    skip = 3 * i * num_samples
    normals = random_ops.mv_normal_sample((num_samples, ),
                                          mean=tf.constant([0.0],
                                                           dtype=kappa.dtype),
                                          random_type=random_type,
                                          seed=seed,
                                          skip=skip)
    skip = (3 * i + 1) * num_samples
    uniforms = tf.squeeze(
        random_ops.uniform(dim=1,
                           sample_shape=[num_samples],
                           dtype=kappa.dtype,
                           random_type=random_type,
                           seed=seed,
                           skip=skip))
    cond = psi < psi_c
    # Result where `cond` is true
    psi_inv = 2 / psi
    b_squared = psi_inv - 1 + tf.sqrt(psi_inv * (psi_inv - 1))

    a = m / (1 + b_squared)
    next_var_true = a * (tf.sqrt(b_squared) + tf.squeeze(normals))**2
    # Result where `cond` is false
    p = (psi - 1) / (psi + 1)
    beta = (1 - p) / m
    next_var_false = tf.where(uniforms > p,
                              tf.math.log(1 - p) - tf.math.log(1 - uniforms),
                              tf.zeros_like(uniforms)) / beta
    next_var = tf.where(cond, next_var_true, next_var_false)
    return next_var
Esempio n. 12
0
        def body_fn(i, written_count, current_vol, current_log_spot, vol_paths,
                    log_spot_paths):
            """Simulate Heston process to the next time point."""
            time_step = dt[i]
            if normal_draws is None:
                normals = random.mv_normal_sample(
                    (num_samples, ),
                    mean=tf.zeros([2], dtype=mean_reversion.dtype),
                    seed=seed)
            else:
                normals = normal_draws[i]

            def _next_vol_fn():
                return _update_variance(mean_reversion[i], theta[i], volvol[i],
                                        rho[i], current_vol, time_step,
                                        normals[..., 0])

            # Do not update variance if `time_step > tolerance`
            next_vol = tf.cond(time_step > tolerance, _next_vol_fn,
                               lambda: current_vol)

            def _next_log_spot_fn():
                return _update_log_spot(mean_reversion[i], theta[i], volvol[i],
                                        rho[i], current_vol, next_vol,
                                        current_log_spot, time_step,
                                        normals[..., 1])

            # Do not update state if `time_step > tolerance`
            next_log_spot = tf.cond(time_step > tolerance, _next_log_spot_fn,
                                    lambda: current_log_spot)
            # Update volatility paths
            vol_paths = utils.maybe_update_along_axis(
                tensor=vol_paths,
                do_update=keep_mask[i + 1],
                ind=written_count,
                axis=1,
                new_tensor=tf.expand_dims(next_vol, axis=1))
            # Update log-spot paths
            log_spot_paths = utils.maybe_update_along_axis(
                tensor=log_spot_paths,
                do_update=keep_mask[i + 1],
                ind=written_count,
                axis=1,
                new_tensor=tf.expand_dims(next_log_spot, axis=1))
            written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
            return (i + 1, written_count, next_vol, next_log_spot, vol_paths,
                    log_spot_paths)
Esempio n. 13
0
        def step_fn(i, written_count, current_state, result):
            """Performs one step of Euler scheme."""
            current_time = times[i + 1]
            dw = random_ops.mv_normal_sample((num_samples, ),
                                             mean=wiener_mean,
                                             random_type=random_type,
                                             seed=seed)
            dw = dw * sqrt_dt[i]
            dt_inc = dt[i] * self.drift_fn()(current_time, current_state)  # pylint: disable=not-callable
            dw_inc = tf.squeeze(
                tf.matmul(self.volatility_fn()(current_time, current_state),
                          dw), -1)  # pylint: disable=not-callable
            next_state = current_state + dt_inc + dw_inc

            # Keep only states for times, requested by user.
            result = tf.cond(keep_mask[i + 1],
                             (lambda: result.write(written_count, next_state)),
                             (lambda: result))
            written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
            return (i + 1, written_count, next_state, result)
Esempio n. 14
0
    def body_fn(i, written_count,
                current_x,
                current_y,
                x_paths,
                y_paths):
      """Simulate qG-HJM process to the next time point."""
      if normal_draws is None:
        normals = random.mv_normal_sample(
            (num_samples,),
            mean=tf.zeros((self._dim,), dtype=self._dtype),
            random_type=random_type, seed=seed)
      else:
        normals = normal_draws[i]

      if self._sqrt_rho is not None:
        normals = tf.linalg.matvec(self._sqrt_rho, normals)

      vol = self._volatility(times[i + 1], current_x)

      next_x = (current_x
                + (current_y - self._mean_reversion * current_x) * dt[i]
                + vol * normals * tf.math.sqrt(dt[i]))
      next_y = current_y + (vol**2 -
                            2.0 * self._mean_reversion * current_y) * dt[i]

      # Update `x_paths` and `y_paths`
      x_paths = utils.maybe_update_along_axis(
          tensor=x_paths,
          do_update=True,
          ind=written_count + 1,
          axis=1,
          new_tensor=tf.expand_dims(next_x, axis=1))
      y_paths = utils.maybe_update_along_axis(
          tensor=y_paths,
          do_update=True,
          ind=written_count + 1,
          axis=1,
          new_tensor=tf.expand_dims(next_y, axis=1))

      written_count += 1
      return (i + 1, written_count, next_x, next_y, x_paths, y_paths)
        def body_fn(i, written_count, current_vol, current_log_spot, vol_paths,
                    log_spot_paths):
            """Simulate Heston process to the next time point."""
            time_step = dt[i]
            if normal_draws is None:
                normals = random.mv_normal_sample(
                    (num_samples, ),
                    mean=tf.zeros([3], dtype=kappa.dtype),
                    seed=seed)
            else:
                normals = normal_draws[i]

            def _next_vol_fn():
                return _update_variance(kappa[i], theta[i], epsilon[i], rho[i],
                                        current_vol, time_step,
                                        normals[..., :2])

            # Do not update variance if `time_step > tolerance`
            next_vol = tf.cond(time_step > tolerance, _next_vol_fn,
                               lambda: current_vol)

            def _next_log_spot_fn():
                return _update_log_spot(kappa[i], theta[i], epsilon[i], rho[i],
                                        current_vol, next_vol,
                                        current_log_spot, time_step,
                                        normals[..., -1])

            # Do not update state if `time_step > tolerance`
            next_log_spot = tf.cond(time_step > tolerance, _next_log_spot_fn,
                                    lambda: current_log_spot)
            vol_paths = tf.cond(
                keep_mask[i + 1],
                lambda: vol_paths.write(written_count, next_vol),
                lambda: vol_paths)
            log_spot_paths = tf.cond(
                keep_mask[i + 1],
                lambda: log_spot_paths.write(written_count, next_log_spot),
                lambda: log_spot_paths)
            written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
            return (i + 1, written_count, next_vol, next_log_spot, vol_paths,
                    log_spot_paths)
Esempio n. 16
0
def _euler_step(i, written_count, current_state, result, drift_fn,
                volatility_fn, wiener_mean, num_samples, times, dt, sqrt_dt,
                keep_mask, random_type, seed, normal_draws):
    """Performs one step of Euler scheme."""
    current_time = times[i + 1]
    if normal_draws is not None:
        dw = normal_draws[i]
    else:
        dw = random.mv_normal_sample((num_samples, ),
                                     mean=wiener_mean,
                                     random_type=random_type,
                                     seed=seed)
    dw = dw * sqrt_dt[i]
    dt_inc = dt[i] * drift_fn(current_time, current_state)  # pylint: disable=not-callable
    dw_inc = tf.linalg.matvec(volatility_fn(current_time, current_state), dw)  # pylint: disable=not-callable
    next_state = current_state + dt_inc + dw_inc

    # Keep only states for times, requested by user.
    result = tf.cond(keep_mask[i + 1],
                     (lambda: result.write(written_count, next_state)),
                     (lambda: result))
    written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
    return (i + 1, written_count, next_state, result)
Esempio n. 17
0
def _milstein_step(*, i, written_count, current_state, result, drift_fn,
                   volatility_fn, grad_volatility_fn, wiener_mean, num_samples,
                   times, dt, sqrt_dt, keep_mask, random_type, seed,
                   normal_draws):
  """Performs one step of Milstein scheme."""
  current_time = times[i + 1]
  written_count = tf.cast(written_count, tf.int32)
  if normal_draws is not None:
    dw = normal_draws[i]
  else:
    dw = random.mv_normal_sample((num_samples,),
                                 mean=wiener_mean,
                                 random_type=random_type,
                                 seed=seed)

  dw = dw * sqrt_dt[i]
  dt_inc = dt[i] * drift_fn(current_time, current_state)  # pylint: disable=not-callable
  dw_inc = tf.linalg.matvec(volatility_fn(current_time, current_state), dw)  # pylint: disable=not-callable

  # Higher order terms. For dim 1, the product here is elementwise.
  # Will need to adjust for higher dims.
  hot_vol = tf.squeeze(
      tf.multiply(
          volatility_fn(current_time, current_state),
          grad_volatility_fn(current_time, current_state)), -1)
  hot_dw = dw * dw - dt[i]
  hot_inc = tf.multiply(hot_vol, hot_dw) / 2
  next_state = current_state + dt_inc + dw_inc + hot_inc

  result = utils.maybe_update_along_axis(
      tensor=result,
      do_update=keep_mask[i + 1],
      ind=written_count,
      axis=1,
      new_tensor=tf.expand_dims(next_state, axis=1))
  written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
  return i + 1, written_count, next_state, result
Esempio n. 18
0
def generate_mc_normal_draws(num_normal_draws,
                             num_time_steps,
                             num_sample_paths,
                             random_type,
                             batch_shape=None,
                             skip=0,
                             seed=None,
                             dtype=None,
                             name=None):
  """Generates normal random samples to be consumed by a Monte Carlo algorithm.

  Many of Monte Carlo (MC) algorithms can be re-written so that all necessary
  random (or quasi-random) variables are drawn in advance as a `Tensor` of
  shape `batch_shape + [num_time_steps, num_samples, num_normal_draws]`, where
  `batch_shape` is the shape of the independent batches of the Monte Carlo
  algorithm, `num_time_steps` is the number of time steps Monte Carlo algorithm
  performs within each batch, `num_sample_paths` is a number of sample paths of
  the Monte Carlo algorithm and `num_normal_draws` is a number of independent
  normal draws per sample path.
  For example, in order to use quasi-random numbers in a Monte Carlo algorithm,
  the samples have to be drawn in advance.
  The function generates a `Tensor`, say, `x` in a format such that for a
  quasi-`random_type` `x[i]` is correspond to different dimensions of the
  quasi-random sequence, so that it can be used in a Monte Carlo algorithm

  Args:
    num_normal_draws: A scalar int32 `Tensor`. The number of independent normal
      draws at each time step for each sample path. Should be a graph
      compilation constant.
    num_time_steps: A scalar int32 `Tensor`. The number of time steps at which
      to draw the independent normal samples. Should be a graph compilation
      constant.
    num_sample_paths: A scalar int32 `Tensor`. The number of trajectories (e.g.,
      Monte Carlo paths) for which to draw the independent normal samples.
      Should be a graph compilation constant.
    random_type: Enum value of `tff.math.random.RandomType`. The type of
      (quasi)-random number generator to use to generate the paths.
    batch_shape: This input can be either of type `tf.TensorShape` or a 1-d
      `Tensor` of type `tf.int32` specifying the dimensions of independent
      batches of normal samples to be drawn.
      Default value: `None` which correspond to a single batch of shape
      `tf.TensorShape([])`.
    skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
      Halton sequence to skip. Used only when `random_type` is 'SOBOL',
      'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
      Default value: `0`.
      seed: Seed for the random number generator. The seed is
        only relevant if `random_type` is one of
        `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
          STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
        `HALTON_RANDOMIZED` the seed should be an Python integer. For
        `STATELESS` and  `STATELESS_ANTITHETIC `must be supplied as an integer
        `Tensor` of shape `[2]`.
        Default value: `None` which means no seed is set.
    dtype: The `dtype` of the output `Tensor`.
      Default value: `None` which maps to `float32`.
    name: Python string. The name to give this op.
      Default value: `None` which maps to `generate_mc_normal_draws`.

  Returns:
   A `Tensor` of shape
   `[num_time_steps] + batch_shape + [num_sample_paths, num_normal_draws]`.
  """
  if name is None:
    name = 'generate_mc_normal_draws'
  if skip is None:
    skip = 0
  with tf.name_scope(name):
    if dtype is None:
      dtype = tf.float32
    if batch_shape is None:
      batch_shape = tf.TensorShape([])

    # In case of quasi-random draws, the total dimension of the draws should be
    # `num_time_steps * dim`
    total_dimension = tf.zeros(
        [num_time_steps * num_normal_draws], dtype=dtype,
        name='total_dimension')
    if random_type in [random.RandomType.PSEUDO_ANTITHETIC,
                       random.RandomType.STATELESS_ANTITHETIC]:
      # Put `num_sample_paths` to the front for antithetic samplers
      sample_shape = tf.concat([[num_sample_paths], batch_shape], axis=0)
      is_antithetic = True
    else:
      # Note that for QMC sequences `num_sample_paths` should follow
      # `batch_shape`
      sample_shape = tf.concat([batch_shape, [num_sample_paths]], axis=0)
      is_antithetic = False
    normal_draws = random.mv_normal_sample(
        sample_shape,
        mean=total_dimension,
        random_type=random_type,
        seed=seed,
        skip=skip)
    # Reshape and transpose
    normal_draws = tf.reshape(
        normal_draws,
        tf.concat([sample_shape, [num_time_steps, num_normal_draws]], axis=0))
    # Shape [steps_num] + batch_shape + [num_samples, dim]
    normal_draws_rank = normal_draws.shape.rank
    if is_antithetic and normal_draws_rank > 3:
      # Permutation for the case when the batch_shape is present
      perm = [normal_draws_rank-2] + list(
          range(1, normal_draws_rank-2)) + [0, normal_draws_rank-1]
    else:
      perm = [normal_draws_rank-2] + list(
          range(normal_draws_rank-2)) + [normal_draws_rank-1]
    normal_draws = tf.transpose(normal_draws, perm=perm)
    return normal_draws
Esempio n. 19
0
def generate_mc_normal_draws(num_normal_draws,
                             num_time_steps,
                             num_sample_paths,
                             random_type,
                             skip=0,
                             seed=None,
                             dtype=None,
                             name=None):
  """Generates normal random samples to be consumed by a Monte Carlo algorithm.

  Many of Monte Carlo (MC) algorithms can be re-written so that all necessary
  random (or quasi-random) variables are drawn in advance as a `Tensor` of
  shape `[num_time_steps, num_samples, num_normal_draws]`, where
  `num_time_steps` is the number of time steps Monte Carlo algorithm performs,
  `num_sample_paths` is a number of sample paths of the Monte Carlo algorithm
  and `num_normal_draws` is a number of independent normal draws per sample
  paths.
  For example, in order to use quasi-random numbers in a Monte Carlo algorithm,
  the samples have to be drawn in advance.
  The function generates a `Tensor`, say, `x` in a format such that for a
  quasi-`random_type` `x[i]` is correspond to different dimensions of the
  quasi-random sequence, so that it can be used in a Monte Carlo algorithm

  Args:
    num_normal_draws: A scalar int32 `Tensor`. The number of independent normal
      draws at each time step for each sample path. Should be a graph
      compilation constant.
    num_time_steps: A scalar int32 `Tensor`. The number of time steps at which
      to draw the independent normal samples. Should be a graph compilation
      constant.
    num_sample_paths: A scalar int32 `Tensor`. The number of trajectories (e.g.,
      Monte Carlo paths) for which to draw the independent normal samples.
      Should be a graph compilation constant.
    random_type: Enum value of `tff.math.random.RandomType`. The type of
      (quasi)-random number generator to use to generate the paths.
    skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
      Halton sequence to skip. Used only when `random_type` is 'SOBOL',
      'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
      Default value: `0`.
      seed: Seed for the random number generator. The seed is
        only relevant if `random_type` is one of
        `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
          STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
        `HALTON_RANDOMIZED` the seed should be an Python integer. For
        `STATELESS` and  `STATELESS_ANTITHETIC `must be supplied as an integer
        `Tensor` of shape `[2]`.
        Default value: `None` which means no seed is set.
    dtype: The `dtype` of the output `Tensor`.
      Default value: `None` which maps to `float32`.
    name: Python string. The name to give this op.
      Default value: `None` which maps to `generate_mc_normal_draws`.

  Returns:
   A `Tensor` of shape `[num_time_steps, num_sample_paths, num_normal_draws]`.
  """
  if name is None:
    name = 'generate_mc_normal_draws'
  if skip is None:
    skip = 0
  with tf.name_scope(name):
    if dtype is None:
      dtype = tf.float32
    # In case of quasi-random draws, the total dimension of the draws should be
    # `num_time_steps * dim`
    total_dimension = tf.zeros([num_time_steps * num_normal_draws], dtype=dtype,
                               name='total_dimension')
    normal_draws = random.mv_normal_sample(
        [num_sample_paths], mean=total_dimension,
        random_type=random_type,
        seed=seed,
        skip=skip)
    # Reshape and transpose
    normal_draws = tf.reshape(
        normal_draws, [num_sample_paths, num_time_steps, num_normal_draws])
    # Shape [steps_num, num_samples, dim]
    normal_draws = tf.transpose(normal_draws, [1, 0, 2])
    return normal_draws
def _milstein_step(*, dim, i, written_count, current_state, result, drift_fn,
                   volatility_fn, grad_volatility_fn, wiener_mean, num_samples,
                   times, dt, sqrt_dt, keep_mask, random_type, seed,
                   normal_draws, input_gradients, stratonovich_order,
                   aux_normal_draws, record_samples):
    """Performs one step of Milstein scheme."""
    current_time = times[i + 1]
    written_count = tf.cast(written_count, tf.int32)
    if normal_draws is not None:
        dw = normal_draws[i]
    else:
        dw = random.mv_normal_sample((num_samples, ),
                                     mean=wiener_mean,
                                     random_type=random_type,
                                     seed=seed)
    if aux_normal_draws is not None:
        stratonovich_draws = []
        for j in range(3):
            stratonovich_draws.append(
                tf.reshape(aux_normal_draws[j][i],
                           [num_samples, dim, stratonovich_order]))
    else:
        stratonovich_draws = []
        # Three sets of normal draws for stratonovich integrals.
        for j in range(3):
            stratonovich_draws.append(
                random.mv_normal_sample(
                    (num_samples, ),
                    mean=tf.zeros((dim, stratonovich_order),
                                  dtype=current_state.dtype,
                                  name='stratonovich_draws_{}'.format(j)),
                    random_type=random_type,
                    seed=seed))

    if dim == 1:
        drift = drift_fn(current_time, current_state)
        vol = volatility_fn(current_time, current_state)
        grad_vol = grad_volatility_fn(current_time, current_state,
                                      tf.ones_like(current_state))
        next_state = _milstein_1d(dw=dw,
                                  dt=dt[i],
                                  sqrt_dt=sqrt_dt[i],
                                  current_state=current_state,
                                  drift=drift,
                                  vol=vol,
                                  grad_vol=grad_vol)
    else:
        drift = drift_fn(current_time, current_state)
        vol = volatility_fn(current_time, current_state)
        # This is a list of size equal to the dimension of the state space `dim`.
        # It contains tensors of shape [num_samples, dim, wiener_dim] representing
        # the gradient of the volatility function. In our case, the dimension of the
        # wiener process `wiener_dim` is equal to the state dimension `dim`.
        grad_vol = [
            grad_volatility_fn(current_time, current_state, start)
            for start in input_gradients
        ]
        next_state = _milstein_nd(dim=dim,
                                  num_samples=num_samples,
                                  dw=dw,
                                  dt=dt[i],
                                  sqrt_dt=sqrt_dt[i],
                                  current_state=current_state,
                                  drift=drift,
                                  vol=vol,
                                  grad_vol=grad_vol,
                                  stratonovich_draws=stratonovich_draws,
                                  stratonovich_order=stratonovich_order)
    if record_samples:
        result = result.write(written_count, next_state)
    else:
        result = next_state
    written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)

    return i + 1, written_count, next_state, result