def sample_paths(self,
                     times,
                     num_samples=1,
                     initial_state=None,
                     random_type=None,
                     seed=None,
                     swap_memory=True,
                     name=None,
                     time_step=None):
        """Returns a sample of paths from the process using Euler sampling.

    The default implementation uses the Euler scheme. However, for particular
    types of Ito processes more efficient schemes can be used.

    Args:
      times: Rank 1 `Tensor` of increasing positive real values. The times at
        which the path points are to be evaluated.
      num_samples: Positive scalar `int`. The number of paths to draw.
        Default value: 1.
      initial_state: `Tensor` of shape `[dim]`. The initial state of the
        process.
        Default value: None which maps to a zero initial state.
      random_type: Enum value of `RandomType`. The type of (quasi)-random number
        generator to use to generate the paths.
        Default value: None which maps to the standard pseudo-random numbers.
      seed: Python `int`. The random seed to use. If not supplied, no seed is
        set.
      swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for
        this op. See an equivalent flag in `tf.while_loop` documentation for
        more details. Useful when computing a gradient of the op since
        `tf.while_loop` is used to propagate stochastic process in time.
        Default value: True.
      name: Python string. The name to give this op.
        Default value: `None` which maps to `sample_paths` is used.
      time_step: Real scalar `Tensor`. The maximal distance between time points
        in grid in Euler scheme.

    Returns:
     A real `Tensor` of shape `[num_samples, k, n]` where `k` is the size of the
     `times`, and `n` is the dimension of the process.
    """
        default_name = self._name + '_sample_path'
        with tf.compat.v1.name_scope(name,
                                     default_name=default_name,
                                     values=[times, initial_state]):
            return euler_sampling.sample(self._dim,
                                         self._drift_fn,
                                         self._volatility_fn,
                                         times,
                                         num_samples=num_samples,
                                         initial_state=initial_state,
                                         random_type=random_type,
                                         time_step=time_step,
                                         seed=seed,
                                         swap_memory=swap_memory,
                                         dtype=self._dtype,
                                         name=name)
    def _sample_paths(self, times, time_step, num_time_steps, num_samples,
                      random_type, skip, seed):
        """Returns a sample of paths from the process."""
        # Initial state should be broadcastable to batch_shape + [num_samples, dim]
        initial_state = tf.zeros(self._batch_shape + [1, self._dim],
                                 dtype=self._dtype)
        # Note that we need a finer simulation grid (determnied by `dt`) to compute
        # discount factors accurately. The `times` input might not be granular
        # enough for accurate calculations.
        time_step_internal = time_step
        if num_time_steps is not None:
            num_time_steps = tf.convert_to_tensor(num_time_steps,
                                                  dtype=tf.int32,
                                                  name='num_time_steps')
            time_step_internal = times[-1] / tf.cast(num_time_steps,
                                                     dtype=self._dtype)

        times, _, time_indices = utils.prepare_grid(
            times=times,
            time_step=time_step_internal,
            dtype=self._dtype,
            num_time_steps=num_time_steps)
        # Add zeros as a starting location
        dt = times[1:] - times[:-1]

        # xy_paths.shape = (num_samples, num_times, nfactors+nfactors^2)
        xy_paths = euler_sampling.sample(self._dim,
                                         self._drift_fn,
                                         self._volatility_fn,
                                         times,
                                         num_samples=num_samples,
                                         initial_state=initial_state,
                                         random_type=random_type,
                                         seed=seed,
                                         time_step=time_step,
                                         num_time_steps=num_time_steps,
                                         skip=skip)

        x_paths = xy_paths[..., :self._factors]
        y_paths = xy_paths[..., self._factors:]

        # shape=(batch_shape, num_times)
        f_0_t = self._instant_forward_rate_fn(times)
        # shape=(batch_shape, num_samples, num_times)
        rate_paths = tf.math.reduce_sum(x_paths, axis=-1) + tf.expand_dims(
            f_0_t, axis=-2)

        dt = tf.concat([tf.convert_to_tensor([0.0], dtype=self._dtype), dt],
                       axis=0)
        discount_factor_paths = tf.math.exp(
            -utils.cumsum_using_matvec(rate_paths * dt))
        return (tf.gather(rate_paths, time_indices, axis=-1),
                tf.gather(discount_factor_paths, time_indices, axis=-1),
                tf.gather(x_paths, time_indices, axis=self._batch_rank + 1),
                tf.gather(y_paths, time_indices, axis=self._batch_rank + 1))
    def _sample_paths(self, times, time_step, num_samples, random_type, skip,
                      seed):
        """Returns a sample of paths from the process."""
        initial_state = tf.zeros((self._dim, ), dtype=self._dtype)
        # Note that we need a finer simulation grid (determnied by `dt`) to compute
        # discount factors accurately. The `times` input might not be granular
        # enough for accurate calculations.
        times, keep_mask, _ = utils.prepare_grid(times=times,
                                                 time_step=time_step,
                                                 dtype=self._dtype)
        # Add zeros as a starting location
        dt = times[1:] - times[:-1]

        # xy_paths.shape = (num_samples, num_times, nfactors+nfactors^2)
        xy_paths = euler_sampling.sample(self._dim,
                                         self._drift_fn,
                                         self._volatility_fn,
                                         times,
                                         num_samples=num_samples,
                                         initial_state=initial_state,
                                         random_type=random_type,
                                         seed=seed,
                                         time_step=time_step,
                                         skip=skip)

        x_paths = xy_paths[..., :self._factors]
        y_paths = xy_paths[..., self._factors:]

        f_0_t = self._instant_forward_rate_fn(times)  # shape=(num_times,)
        rate_paths = tf.math.reduce_sum(
            x_paths, axis=-1) + f_0_t  # shape=(num_samples, num_times)

        discount_factor_paths = tf.math.exp(-rate_paths[:, :-1] * dt)
        discount_factor_paths = tf.concat(
            [
                tf.ones(
                    (num_samples, 1), dtype=self._dtype), discount_factor_paths
            ],
            axis=1)  # shape=(num_samples, num_times)
        discount_factor_paths = utils.cumprod_using_matvec(
            discount_factor_paths)

        return (tf.boolean_mask(rate_paths, keep_mask, axis=1),
                tf.boolean_mask(discount_factor_paths, keep_mask, axis=1),
                tf.boolean_mask(x_paths, keep_mask, axis=1),
                tf.boolean_mask(y_paths, keep_mask, axis=1))
Esempio n. 4
0
  def _sample_paths(self, times, time_step, num_samples, random_type, skip,
                    seed):
    """Returns a sample of paths from the process."""
    initial_state = tf.zeros((self._dim,), dtype=self._dtype)
    # Note that we need a finer simulation grid (determnied by `dt`) to compute
    # discount factors accurately. The `times` input might not be granular
    # enough for accurate calculations.
    times, _, time_indices = utils.prepare_grid(
        times=times, time_step=time_step, dtype=self._dtype)
    # Add zeros as a starting location
    dt = times[1:] - times[:-1]

    # Shape = (num_samples, num_times, nfactors)
    paths = euler_sampling.sample(
        self._dim,
        self._drift_fn,
        self._volatility_fn,
        times,
        num_samples=num_samples,
        initial_state=initial_state,
        random_type=random_type,
        seed=seed,
        time_step=time_step,
        skip=skip)
    y_paths = self.state_y(times)  # shape=(dim, dim, num_times)
    y_paths = tf.reshape(
        y_paths, tf.concat([[self._dim**2], tf.shape(times)], axis=0))

    # shape=(num_samples, num_times, dim**2)
    y_paths = tf.repeat(tf.expand_dims(tf.transpose(
        y_paths), axis=0), num_samples, axis=0)

    f_0_t = self._instant_forward_rate_fn(times)  # shape=(num_times,)
    rate_paths = tf.math.reduce_sum(
        paths, axis=-1) + f_0_t  # shape=(num_samples, num_times)

    discount_factor_paths = tf.math.exp(-rate_paths[:, :-1] * dt)
    discount_factor_paths = tf.concat(
        [tf.ones((num_samples, 1), dtype=self._dtype), discount_factor_paths],
        axis=1)  # shape=(num_samples, num_times)
    discount_factor_paths = utils.cumprod_using_matvec(discount_factor_paths)
    return (tf.gather(rate_paths, time_indices, axis=1),
            tf.gather(discount_factor_paths, time_indices, axis=1),
            tf.gather(paths, time_indices, axis=1),
            tf.gather(y_paths, time_indices, axis=1))
  def sample_paths(self,
                   times,
                   num_samples=1,
                   initial_state=None,
                   random_type=None,
                   seed=None,
                   time_step=None,
                   swap_memory=True,
                   skip=0,
                   name=None):
    """Returns a sample of paths from the process using Euler sampling.

    Args:
      times: Rank 1 `Tensor` of increasing positive real values. The times at
        which the path points are to be evaluated.
      num_samples: Positive scalar `int`. The number of paths to draw.
        Default value: 1.
      initial_state: `Tensor` of shape `[self._dim]`. The initial state of the
        process.
        Default value: None which maps to a zero initial state.
      random_type: Enum value of `RandomType`. The type of (quasi)-random number
        generator to use to generate the paths.
        Default value: None which maps to the standard pseudo-random numbers.
      seed: Seed for the random number generator. The seed is
        only relevant if `random_type` is one of
        `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
          STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
        `HALTON_RANDOMIZED` the seed should be an integer scalar `Tensor`. For
        `STATELESS` and  `STATELESS_ANTITHETIC `must be supplied as an integer
        `Tensor` of shape `[2]`.
        Default value: `None` which means no seed is set.
      time_step: Real scalar `Tensor`. The maximal distance between time points
        in grid in Euler scheme.
      swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for
        this op. See an equivalent flag in `tf.while_loop` documentation for
        more details. Useful when computing a gradient of the op since
        `tf.while_loop` is used to propagate stochastic process in time.
        Default value: True.
      skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
        Halton sequence to skip. Used only when `random_type` is 'SOBOL',
        'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
        Default value: `0`.
      name: Python string. The name to give this op.
        Default value: `None` which maps to `sample_paths` is used.

    Returns:
     A real `Tensor` of shape `[num_samples, k, n]` where `k` is the size of the
     `times`, and `n` is the dimension of the process.

    Raises:
      ValueError: If `time_step` is not supplied.
    """
    if time_step is None:
      raise ValueError("`time_step` has to be supplied for JoinedItoProcess "
                       "`sample_paths` method.")
    name = name or self._name + "sample_paths"
    with tf.name_scope(name):
      if initial_state is None:
        initial_state = tf.zeros(self._dim, dtype=self.dtype(),
                                 name="initial_state")
      else:
        if isinstance(initial_state, (tuple, list)):
          initial_state = [tf.convert_to_tensor(state, dtype=self.dtype(),
                                                name="initial_state")
                           for state in initial_state]
          initial_state = tf.stack(initial_state)
        else:
          initial_state = tf.convert_to_tensor(initial_state,
                                               dtype=self.dtype(),
                                               name="initial_state")
      samples = euler_sampling.sample(self.dim(),
                                      drift_fn=self.drift_fn(),
                                      volatility_fn=self.volatility_fn(),
                                      times=times,
                                      time_step=time_step,
                                      num_samples=num_samples,
                                      initial_state=initial_state,
                                      random_type=random_type,
                                      seed=seed,
                                      swap_memory=swap_memory,
                                      skip=skip,
                                      dtype=self.dtype())
      return samples
Esempio n. 6
0
    def sample_paths(self,
                     times,
                     initial_state,
                     num_samples=1,
                     random_type=None,
                     seed=None,
                     skip=0,
                     time_step=None,
                     name=None):
        """Returns a sample of paths from the correlated Hull-White process.

    Uses exact sampling if `self.mean_reversion`, `self.volatility` and
    `self.corr_matrix` are all `Tensor`s or piecewise constant functions, and
    Euler scheme sampling if one of the arguments is a generic callable.

    Args:
      times: Rank 1 `Tensor` of positive real values. The times at which the
        path points are to be evaluated.
      initial_state: A `Tensor` of the same `dtype` as `times` and shape
        broadcastable with `[num_samples, self._dim]`
      num_samples: Positive scalar `int32` `Tensor`. The number of paths to
        draw.
      random_type: Enum value of `RandomType`. The type of (quasi)-random
        number generator to use to generate the paths.
        Default value: `None` which maps to the standard pseudo-random numbers.
      seed: Seed for the random number generator. The seed is
        only relevant if `random_type` is one of
        `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
          STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
        `HALTON_RANDOMIZED` the seed should be an Python integer. For
        `STATELESS` and  `STATELESS_ANTITHETIC `must be supplied as an integer
        `Tensor` of shape `[2]`.
        Default value: `None` which means no seed is set.
      skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
        Halton sequence to skip. Used only when `random_type` is 'SOBOL',
        'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
        Default value: `0`.
      time_step: Scalar real `Tensor`. Maximal distance between time grid points
        in Euler scheme. Used only when Euler scheme is applied.
      Default value: `None`.
      name: Python string. The name to give this op.
        Default value: `sample_paths`.

    Returns:
      A `Tensor` of shape [num_samples, k, dim] where `k` is the size
      of the `times` and `dim` is the dimension of the process.

    Raises:
      ValueError:
        (a) If `times` has rank different from `1`.
        (b) If Euler scheme is used by times is not supplied.
    """
        # Note: all the notations below are the same as in [1].
        name = name or self._name + '_sample_path'
        with tf.name_scope(name):
            times = tf.convert_to_tensor(times, self._dtype)
            if len(times.shape) != 1:
                raise ValueError('`times` should be a rank 1 Tensor. '
                                 'Rank is {} instead.'.format(len(
                                     times.shape)))
            if self._sample_with_generic:
                if time_step is None:
                    raise ValueError(
                        '`time_step` can not be `None` when at least one of '
                        'the parameters is a generic callable.')
                return euler_sampling.sample(dim=self._dim,
                                             drift_fn=self._drift_fn,
                                             volatility_fn=self._volatility_fn,
                                             times=times,
                                             time_step=time_step,
                                             num_samples=num_samples,
                                             initial_state=initial_state,
                                             random_type=random_type,
                                             seed=seed,
                                             skip=skip,
                                             dtype=self._dtype)
            current_rates = tf.broadcast_to(
                tf.convert_to_tensor(initial_state, dtype=self._dtype),
                [num_samples, self._dim])
            current_instant_forward_rates = self._instant_forward_rate_fn(
                tf.constant(0, self._dtype))
            num_requested_times = times.shape[0]
            params = [
                self._mean_reversion, self._volatility, self._corr_matrix
            ]
            if self._corr_matrix is not None:
                params = params + [self._corr_matrix]
            times, keep_mask = _prepare_grid(times, params)
            return self._sample_paths(times, num_requested_times,
                                      current_rates,
                                      current_instant_forward_rates,
                                      num_samples, random_type, skip,
                                      keep_mask, seed)
Esempio n. 7
0
    def sample_paths(self,
                     times,
                     num_samples=1,
                     random_type=None,
                     seed=None,
                     skip=0,
                     time_step=None,
                     times_grid=None,
                     normal_draws=None,
                     validate_args=False,
                     name=None):
        """Returns a sample of paths from the correlated Hull-White process.

    Uses exact sampling if `self.mean_reversion` is constant and
    `self.volatility` and `self.corr_matrix` are all `Tensor`s or piecewise
    constant functions, and Euler scheme sampling otherwise.

    The exact sampling implements the algorithm and notations in [1], section
    10.1.6.1.

    Args:
      times: Rank 1 `Tensor` of positive real values. The times at which the
        path points are to be evaluated.
      num_samples: Positive scalar `int32` `Tensor`. The number of paths to
        draw.
      random_type: Enum value of `RandomType`. The type of (quasi)-random
        number generator to use to generate the paths.
        Default value: `None` which maps to the standard pseudo-random numbers.
      seed: Seed for the random number generator. The seed is
        only relevant if `random_type` is one of
        `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
          STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
        `HALTON_RANDOMIZED` the seed should be an Python integer. For
        `STATELESS` and  `STATELESS_ANTITHETIC `must be supplied as an integer
        `Tensor` of shape `[2]`.
        Default value: `None` which means no seed is set.
      skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
        Halton sequence to skip. Used only when `random_type` is 'SOBOL',
        'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
        Default value: `0`.
      time_step: Scalar real `Tensor`. Maximal distance between time grid points
        in Euler scheme. Used only when Euler scheme is applied.
      Default value: `None`.
      times_grid: An optional rank 1 `Tensor` representing time discretization
        grid. If `times` are not on the grid, then the nearest points from the
        grid are used. When supplied, `time_step` and jumps of the piecewise
        constant arguments are ignored.
        Default value: `None`, which means that the times grid is computed using
        `time_step`.  When exact sampling is used, the shape should be equal to
        `[num_time_points + 1]` where `num_time_points` is `tf.shape(times)[0]`
        plus the number of jumps of the Hull-White piecewise constant
        parameters. The grid should include the initial time point which is
        usually set to `0.0`.
      normal_draws: A `Tensor` of shape `[num_samples, num_time_points, dim]`
        and the same `dtype` as `times`. Represents random normal draws to
        compute increments `N(0, t_{n+1}) - N(0, t_n)`. When supplied,
        `num_samples` argument is ignored and the first dimensions of
        `normal_draws` is used instead. When exact sampling is used,
        `num_time_points` should be equal to `tf.shape(times)[0]` plus the
        number of jumps of the Hull-White piecewise constant  parameters.
        Default value: `None` which means that the draws are generated by the
        algorithm.
      validate_args: Python `bool`. When `True` and `normal_draws` are supplied,
        checks that `tf.shape(normal_draws)[1]` is equal to the total number of
        time steps performed by the sampler.
        When `False` invalid dimension may silently render incorrect outputs.
        Default value: `False`.
      name: Python string. The name to give this op.
        Default value: `sample_paths`.

    Returns:
      A `Tensor` of shape [num_samples, k, dim] where `k` is the size
      of the `times` and `dim` is the dimension of the process.

    Raises:
      ValueError:
        (a) If `times` has rank different from `1`.
        (b) If Euler scheme is used by times is not supplied.
        (c) When neither `times_grid` nor `time_step` are supplied and Euler
          scheme is used.
        (d) If `normal_draws` is supplied and `dim` is mismatched.
      tf.errors.InvalidArgumentError: If `normal_draws` is supplied and the
        number of time steps implied by `times_grid` or `times_step` is
        mismatched.
    """
        # Note: all the notations below are the same as in [2].
        name = name or self._name + '_sample_path'
        with tf.name_scope(name):
            times = tf.convert_to_tensor(times, self._dtype, name='times')
            if times_grid is not None:
                times_grid = tf.convert_to_tensor(times_grid,
                                                  self._dtype,
                                                  name='times_grid')
            if len(times.shape) != 1:
                raise ValueError('`times` should be a rank 1 Tensor. '
                                 'Rank is {} instead.'.format(len(
                                     times.shape)))
            if self._sample_with_generic:
                if time_step is None and times_grid is None:
                    raise ValueError(
                        'Either `time_step` or `times_grid` has to be specified when '
                        'at least one of the parameters is a generic callable.'
                    )
                initial_state = self._instant_forward_rate_fn(0.0)
                return euler_sampling.sample(dim=self._dim,
                                             drift_fn=self._drift_fn,
                                             volatility_fn=self._volatility_fn,
                                             times=times,
                                             time_step=time_step,
                                             num_samples=num_samples,
                                             initial_state=initial_state,
                                             random_type=random_type,
                                             seed=seed,
                                             skip=skip,
                                             times_grid=times_grid,
                                             normal_draws=normal_draws,
                                             dtype=self._dtype)
            if normal_draws is not None:
                normal_draws = tf.convert_to_tensor(normal_draws,
                                                    dtype=self._dtype,
                                                    name='normal_draws')
                # Shape [num_time_points, num_samples, dim]
                normal_draws = tf.transpose(normal_draws, [1, 0, 2])
                num_samples = tf.shape(normal_draws)[1]
                draws_dim = normal_draws.shape[2]
                if self._dim != draws_dim:
                    raise ValueError(
                        '`dim` should be equal to `normal_draws.shape[2]` but are '
                        '{0} and {1} respectively'.format(
                            self._dim, draws_dim))
            return self._sample_paths(times=times,
                                      num_samples=num_samples,
                                      random_type=random_type,
                                      normal_draws=normal_draws,
                                      skip=skip,
                                      seed=seed,
                                      validate_args=validate_args,
                                      times_grid=times_grid)
  def sample_paths(self,
                   times,
                   num_samples=1,
                   initial_state=None,
                   random_type=None,
                   seed=None,
                   swap_memory=True,
                   name=None,
                   time_step=None,
                   num_time_steps=None,
                   skip=0,
                   precompute_normal_draws=True,
                   times_grid=None,
                   normal_draws=None,
                   watch_params=None,
                   validate_args=False):
    """Returns a sample of paths from the process using Euler sampling.

    The default implementation uses the Euler scheme. However, for particular
    types of Ito processes more efficient schemes can be used.

    Args:
      times: Rank 1 `Tensor` of increasing positive real values. The times at
        which the path points are to be evaluated.
      num_samples: Positive scalar `int`. The number of paths to draw.
        Default value: 1.
      initial_state: `Tensor` of shape broadcastable
        `batch_shape + [num_samples, dim]`. The initial state of the process.
        `batch_shape` represents the shape of the independent batches of the
        stochastic process as in the `drift_fn` and `volatility_fn` of the
        underlying class. Note that the `batch_shape` is inferred from
        the `initial_state` and hence when sampling is requested for a batch of
        stochastic processes, the shape of `initial_state` should be as least
        `batch_shape + [1, 1]`.
        Default value: None which maps to a zero initial state.
      random_type: Enum value of `RandomType`. The type of (quasi)-random number
        generator to use to generate the paths.
        Default value: None which maps to the standard pseudo-random numbers.
      seed: Seed for the random number generator. The seed is
        only relevant if `random_type` is one of
        `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
          STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
        `HALTON_RANDOMIZED` the seed should be an Python integer. For
        `STATELESS` and  `STATELESS_ANTITHETIC `must be supplied as an integer
        `Tensor` of shape `[2]`.
        Default value: `None` which means no seed is set.
      swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for
        this op. See an equivalent flag in `tf.while_loop` documentation for
        more details. Useful when computing a gradient of the op since
        `tf.while_loop` is used to propagate stochastic process in time.
        Default value: True.
      name: Python string. The name to give this op.
        Default value: `None` which maps to `sample_paths` is used.
      time_step: An optional scalar real `Tensor` - maximal distance between
        points in the time grid.
        Either this or `num_time_steps` should be supplied.
        Default value: `None`.
      num_time_steps: An optional Scalar integer `Tensor` - a total number of
        time steps performed by the algorithm. The maximal distance betwen
        points in grid is bounded by
        `times[-1] / (num_time_steps - times.shape[0])`.
        Either this or `time_step` should be supplied.
        Default value: `None`.
      skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
        Halton sequence to skip. Used only when `random_type` is 'SOBOL',
        'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
        Default value: `0`.
      precompute_normal_draws: Python bool. Indicates whether the noise
        increments in Euler scheme are precomputed upfront (see
        `models.euler_sampling.sample`). For `HALTON` and `SOBOL` random types
        the increments are always precomputed. While the resulting graph
        consumes more memory, the performance gains might be significant.
        Default value: `True`.
      times_grid: An optional rank 1 `Tensor` representing time discretization
        grid. If `times` are not on the grid, then the nearest points from the
        grid are used.
        Default value: `None`, which means that times grid is computed using
        `time_step` and `num_time_steps`.
      normal_draws: A `Tensor` of shape
        `batch_shape + [num_samples, num_time_points, dim]`
        and the same `dtype` as `times`. Represents random normal draws to
        compute increments `N(0, t_{n+1}) - N(0, t_n)`. `batch_shape` is the
        shape of the independent batches of the stochastic process. When
        supplied, `num_sample`, `time_step` and `num_time_steps` arguments are
        ignored and the first dimensions of `normal_draws` are used instead.
      watch_params: An optional list of zero-dimensional `Tensor`s of the same
        `dtype` as `initial_state`. If provided, specifies `Tensor`s with
        respect to which the differentiation of the sampling function will
        happen. A more efficient algorithm is used when `watch_params` are
        specified. Note the the function becomes differentiable onlhy wrt to
        these `Tensor`s and the `initial_state`. The gradient wrt any other
        `Tensor` is set to be zero.
      validate_args: Python `bool`. When `True` and `normal_draws` are supplied,
        checks that `tf.shape(normal_draws)[1]` is equal to `num_time_steps`
        that is either supplied as an argument or computed from `time_step`.
        When `False` invalid dimension may silently render incorrect outputs.
        Default value: `False`.

    Returns:
     A real `Tensor` of shape `batch_shape + [num_samples, k, n]` where `k`
     is the size of the `times`, and `n` is the dimension of the process.

    Raises:
      ValueError:
        (a) When `times_grid` is not supplied, and neither `num_time_steps` nor
          `time_step` are supplied or if both are supplied.
        (b) If `normal_draws` is supplied and `dim` is mismatched.
      tf.errors.InvalidArgumentError: If `normal_draws` is supplied and
        `num_time_steps` is mismatched.
    """
    name = name or (self._name + '_sample_path')
    with tf.name_scope(name):
      return euler_sampling.sample(
          self._dim,
          self._drift_fn,
          self._volatility_fn,
          times,
          num_samples=num_samples,
          initial_state=initial_state,
          random_type=random_type,
          time_step=time_step,
          num_time_steps=num_time_steps,
          seed=seed,
          swap_memory=swap_memory,
          skip=skip,
          precompute_normal_draws=precompute_normal_draws,
          times_grid=times_grid,
          normal_draws=normal_draws,
          watch_params=watch_params,
          dtype=self._dtype,
          name=name)
Esempio n. 9
0
  def test_pricing_european_option(self,
                                   beta,
                                   volvol,
                                   rho,
                                   time_step,
                                   initial_forward,
                                   strikes,
                                   initial_volatility,
                                   put_option=True):
    """Test that the SABR model computes the same price as the Euler method."""
    dtype = np.float64
    times = [0.5]
    num_samples = 10000
    test_seed = [123, 124]
    beta = tf.convert_to_tensor(beta, dtype=dtype)
    volvol = tf.convert_to_tensor(volvol, dtype=dtype)
    rho = tf.convert_to_tensor(rho, dtype=dtype)

    if put_option:
      option_fn = lambda samples, strike: strike - samples
    else:
      option_fn = lambda samples, strike: samples - strike

    drift_fn = lambda _, x: tf.zeros_like(x)

    def _vol_fn(t, x):
      """The volatility function for the SABR model."""
      del t
      f = x[..., 0]
      v = x[..., 1]
      fb = f**beta
      m11 = v * fb * tf.math.sqrt(1 - tf.square(rho))
      m12 = v * fb * rho
      m21 = tf.zeros_like(m11)
      m22 = volvol * v
      mc1 = tf.concat([tf.expand_dims(m11, -1), tf.expand_dims(m21, -1)], -1)
      mc2 = tf.concat([tf.expand_dims(m12, -1), tf.expand_dims(m22, -1)], -1)
      # Set up absorbing boundary.
      should_be_zero = tf.expand_dims(
          tf.expand_dims((beta != 0) & (f <= 0.), -1), -1)
      vol_matrix = tf.concat([tf.expand_dims(mc1, -1),
                              tf.expand_dims(mc2, -1)], -1)
      return tf.where(should_be_zero, tf.zeros_like(vol_matrix), vol_matrix)

    euler_paths = euler_sampling.sample(
        dim=2,
        drift_fn=drift_fn,
        volatility_fn=_vol_fn,
        times=times,
        time_step=time_step,
        num_samples=num_samples,
        initial_state=[initial_forward, initial_volatility],
        random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
        seed=test_seed,
        dtype=dtype)

    euler_paths = self.evaluate(euler_paths)
    euler_samples = euler_paths[..., 0]

    process = SabrModel(
        beta=beta,
        volvol=volvol,
        rho=rho,
        dtype=dtype,
        enable_unbiased_sampling=True)
    # Use a 10x grid step to make the test faster.
    paths = process.sample_paths(
        initial_forward=initial_forward,
        initial_volatility=initial_volatility,
        times=times,
        time_step=time_step * 10,
        num_samples=num_samples,
        seed=test_seed,
        validate_args=True,
        random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC)
    paths = self.evaluate(paths)
    samples = paths[..., 0]

    for strike in strikes:
      euler_mean, euler_price = (np.average(euler_samples),
                                 np.average(
                                     np.maximum(
                                         option_fn(euler_samples, strike), 0)))
      mean, price = (np.average(samples),
                     np.average(np.maximum(option_fn(samples, strike), 0)))
      self.assertAllClose([euler_mean, euler_price], [mean, price],
                          rtol=0.05,
                          atol=0.05)
Esempio n. 10
0
    def test_relative_error(self):
        """Replicate tests from reference [1] test case 4."""
        dtype = np.float64
        num_samples = 1000
        test_seed = [123, 124]

        initial_forward = tf.constant(0.07, dtype=dtype)
        initial_volatility = tf.constant(0.4, dtype=dtype)
        volvol = tf.constant(0.8, dtype=dtype)
        beta = tf.constant(0.4, dtype=dtype)
        rho = tf.constant(-0.6, dtype=dtype)
        times = [1]
        timesteps = [0.0625, 0.03125]
        strike = 0.4
        process = SabrModel(beta=beta,
                            volvol=volvol,
                            rho=rho,
                            dtype=dtype,
                            enable_unbiased_sampling=True)

        euler_table = []
        process_table = []
        drift_fn = lambda _, x: tf.zeros_like(x)

        def _vol_fn(t, x):
            """The volatility function for the SABR model."""
            del t
            f = x[..., 0]
            v = x[..., 1]
            fb = f**beta
            m11 = v * fb * tf.math.sqrt(1 - tf.square(rho))
            m12 = v * fb * rho
            m21 = tf.zeros_like(m11)
            m22 = volvol * v
            mc1 = tf.concat([tf.expand_dims(m11, -1),
                             tf.expand_dims(m21, -1)], -1)
            mc2 = tf.concat([tf.expand_dims(m12, -1),
                             tf.expand_dims(m22, -1)], -1)
            # Set up absorbing boundary.
            should_be_zero = tf.expand_dims(
                tf.expand_dims((beta != 0) & (f <= 0.), -1), -1)
            vol_matrix = tf.concat(
                [tf.expand_dims(mc1, -1),
                 tf.expand_dims(mc2, -1)], -1)
            return tf.where(should_be_zero, tf.zeros_like(vol_matrix),
                            vol_matrix)

        # We compute the relative error across time steps ts for a fixed expiry T:
        # error = | C(T, ts_i) - C(S(T, ts_{i+1})) |
        # where ts_i > ts_{i+1} and C( .. ) is the call option price
        for ts in timesteps:
            euler_paths = euler_sampling.sample(
                dim=2,
                drift_fn=drift_fn,
                volatility_fn=_vol_fn,
                times=times,
                time_step=ts,
                num_samples=num_samples,
                initial_state=[initial_forward, initial_volatility],
                random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
                seed=test_seed,
                dtype=dtype)

            euler_paths = self.evaluate(euler_paths)
            euler_samples = euler_paths[..., 0]
            euler_price = np.average(np.maximum(euler_samples - strike, 0))
            euler_table.append(euler_price)

            paths = process.sample_paths(
                initial_forward=initial_forward,
                initial_volatility=initial_volatility,
                times=times,
                time_step=ts,
                num_samples=num_samples,
                seed=test_seed,
                validate_args=True,
                random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC)
            paths = self.evaluate(paths)
            samples = paths[..., 0]
            price = np.average(np.maximum(samples - strike, 0))
            process_table.append(price)

        euler_error = 0
        process_error = 0
        for i in range(0, len(timesteps) - 1):
            euler_error += np.abs(euler_table[i] - euler_table[i + 1])
            process_error += np.abs(process_table[i] - process_table[i + 1])
        # Average relative error should be lower.
        self.assertLessEqual(process_error, euler_error)
Esempio n. 11
0
    def sample_paths(self,
                     times,
                     num_samples=1,
                     initial_state=None,
                     random_type=None,
                     seed=None,
                     swap_memory=True,
                     name=None,
                     time_step=None,
                     skip=0,
                     precompute_normal_draws=True,
                     watch_params=None):
        """Returns a sample of paths from the process using Euler sampling.

    The default implementation uses the Euler scheme. However, for particular
    types of Ito processes more efficient schemes can be used.

    Args:
      times: Rank 1 `Tensor` of increasing positive real values. The times at
        which the path points are to be evaluated.
      num_samples: Positive scalar `int`. The number of paths to draw.
        Default value: 1.
      initial_state: `Tensor` of shape `[dim]`. The initial state of the
        process.
        Default value: None which maps to a zero initial state.
      random_type: Enum value of `RandomType`. The type of (quasi)-random number
        generator to use to generate the paths.
        Default value: None which maps to the standard pseudo-random numbers.
      seed: Seed for the random number generator. The seed is
        only relevant if `random_type` is one of
        `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
          STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
        `HALTON_RANDOMIZED` the seed should be an Python integer. For
        `STATELESS` and  `STATELESS_ANTITHETIC `must be supplied as an integer
        `Tensor` of shape `[2]`.
        Default value: `None` which means no seed is set.
      swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for
        this op. See an equivalent flag in `tf.while_loop` documentation for
        more details. Useful when computing a gradient of the op since
        `tf.while_loop` is used to propagate stochastic process in time.
        Default value: True.
      name: Python string. The name to give this op.
        Default value: `None` which maps to `sample_paths` is used.
      time_step: Real scalar `Tensor`. The maximal distance between time points
        in grid in Euler scheme.
      skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
        Halton sequence to skip. Used only when `random_type` is 'SOBOL',
        'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
        Default value: `0`.
      precompute_normal_draws: Python bool. Indicates whether the noise
        increments in Euler scheme are precomputed upfront (see
        `models.euler_sampling.sample`). For `HALTON` and `SOBOL` random types
        the increments are always precomputed. While the resulting graph
        consumes more memory, the performance gains might be significant.
        Default value: `True`.
      watch_params: An optional list of zero-dimensional `Tensor`s of the same
        `dtype` as `initial_state`. If provided, specifies `Tensor`s with
        respect to which the differentiation of the sampling function will
        happen. A more efficient algorithm is used when `watch_params` are
        specified. Note the the function becomes differentiable onlhy wrt to
        these `Tensor`s and the `initial_state`. The gradient wrt any other
        `Tensor` is set to be zero.

    Returns:
     A real `Tensor` of shape `[num_samples, k, n]` where `k` is the size of the
     `times`, and `n` is the dimension of the process.

    Raises:
      ValueError: If `time_step` is not supplied.
    """
        if time_step is None:
            raise ValueError('`time_step` can not be `None` when calling '
                             'sample_paths of GenericItoProcess.')
        name = name or (self._name + '_sample_path')
        with tf.name_scope(name):
            return euler_sampling.sample(
                self._dim,
                self._drift_fn,
                self._volatility_fn,
                times,
                num_samples=num_samples,
                initial_state=initial_state,
                random_type=random_type,
                time_step=time_step,
                seed=seed,
                swap_memory=swap_memory,
                skip=skip,
                precompute_normal_draws=precompute_normal_draws,
                watch_params=watch_params,
                dtype=self._dtype,
                name=name)
  def sample_paths(self,
                   times,
                   num_samples=1,
                   initial_state=None,
                   random_type=None,
                   seed=None,
                   swap_memory=True,
                   name=None,
                   time_step=None,
                   precompute_normal_draws=True):
    """Returns a sample of paths from the process using Euler sampling.

    The default implementation uses the Euler scheme. However, for particular
    types of Ito processes more efficient schemes can be used.

    Args:
      times: Rank 1 `Tensor` of increasing positive real values. The times at
        which the path points are to be evaluated.
      num_samples: Positive scalar `int`. The number of paths to draw.
        Default value: 1.
      initial_state: `Tensor` of shape `[dim]`. The initial state of the
        process.
        Default value: None which maps to a zero initial state.
      random_type: Enum value of `RandomType`. The type of (quasi)-random number
        generator to use to generate the paths.
        Default value: None which maps to the standard pseudo-random numbers.
      seed: Python `int`. The random seed to use. If not supplied, no seed is
        set.
      swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for
        this op. See an equivalent flag in `tf.while_loop` documentation for
        more details. Useful when computing a gradient of the op since
        `tf.while_loop` is used to propagate stochastic process in time.
        Default value: True.
      name: Python string. The name to give this op.
        Default value: `None` which maps to `sample_paths` is used.
      time_step: Real scalar `Tensor`. The maximal distance between time points
        in grid in Euler scheme.
      precompute_normal_draws: Python bool. Indicates whether the noise
        increments in Euler scheme are precomputed upfront (see
        `models.euler_sampling.sample`). For `HALTON` and `SOBOL` random types
        the increments are always precomputed. While the resulting graph
        consumes more memory, the performance gains might be significant.
        Default value: `True`.

    Returns:
     A real `Tensor` of shape `[num_samples, k, n]` where `k` is the size of the
     `times`, and `n` is the dimension of the process.

    Raises:
      ValueError: If `time_step` is not supplied.
    """
    if time_step is None:
      raise ValueError('`time_step` can not be `None` when calling '
                       'sample_paths of GenericItoProcess.')
    name = name or (self._name + '_sample_path')
    with tf.name_scope(name):
      return euler_sampling.sample(
          self._dim,
          self._drift_fn,
          self._volatility_fn,
          times,
          num_samples=num_samples,
          initial_state=initial_state,
          random_type=random_type,
          time_step=time_step,
          seed=seed,
          swap_memory=swap_memory,
          precompute_normal_draws=precompute_normal_draws,
          dtype=self._dtype,
          name=name)