Esempio n. 1
0
def _interpolate_adjacent(times, values, name=None):
    """Interpolates linearly between adjacent values.

  Suppose `times` are `[t_1, t_2, ..., t_n]` an array of length `n` and
  values are `[f_1, ... f_n]` of length `n`. This function associates
  each of the values to the midpoint of the interval i.e. `f_i` is associated
  to the midpoint of the interval `[t_i, t_{i+1}]`. Then it calculates the
  values at the interval boundaries by linearly interpolating between adjacent
  intervals. The first interval is considered to be `[0, t_1]`. The values at
  the endpoints (i.e. result[0] and result[n]) are computed as follows:
  `result[0] = values[0] - 0.5 * (result[1] - values[0])` and
  `result[n] = values[n-1] - 0.5 * (result[n-1] - values[n-1])`.
  The rationale for these specific values is discussed in Ref. [1].

  Args:
    times: A rank 1 `Tensor` of real dtype. The times at which the interpolated
      values are to be computed. The values in the array should be positive and
      monotonically increasing.
    values: A rank 1 `Tensor` of the same dtype and shape as `times`. The values
      assigned to the midpoints of the time intervals.
    name: Python `str` name prefixed to Ops created by this class.
      Default value: None which is mapped to the default name
        'interpolate_adjacent'.

  Returns:
    interval_values: The values interpolated from the supplied midpoint values
      as described above. A `Tensor` of the same dtype as `values` but shape
      `[n+1]` where `[n]` is the shape of `values`. The `i`th component of the
      is the value associated to the time point `t_{i+1}` with `t_0 = 0`.
  """
    with tf.compat.v1.name_scope(name,
                                 default_name='interpolate_adjacent',
                                 values=[times, values]):
        dt1 = diff(times, order=1, exclusive=False)
        dt2 = diff(times, order=2, exclusive=False)[1:]
        weight_right = dt1[:-1] / dt2
        weight_left = dt1[1:] / dt2
        interior_values = weight_right * values[1:] + weight_left * values[:-1]
        value_0 = values[0] - 0.5 * (interior_values[0] - values[0])
        value_n = values[-1] - 0.5 * (interior_values[-1] - values[-1])
        return tf.concat([[value_0], interior_values, [value_n]], axis=0)
Esempio n. 2
0
def realized_volatility(sample_paths,
                        times=None,
                        scaling_factors=None,
                        returns_type=ReturnsType.LOG,
                        path_scale=PathScale.ORIGINAL,
                        axis=-1,
                        dtype=None,
                        name=None):
  r"""Calculates the total realized volatility for each path.

  With `t_i, i=0,...,N` being a discrete sequence of times at which a series
  `S_{t_k}, i=0,...,N` is observed. The logarithmic returns (`ReturnsType.LOG`)
  process is given by:

  ```
  R_k = log(S_{t_{k}} / S_{t_{k-1}})^2
  ```

  Whereas for absolute returns (`ReturnsType.ABS`) it is given by:

  ```
  R_k = |S_{t_k}} - S_{t_{k-1}})| / |S_{t_{k-1}}|
  ```

  Letting `dt_k = t_k - t_{k-1}` the realized variance is then calculated as:

  ```
  V = c * f( \Sum_{k=1}^{N-1} R_k / dt_k )
  ```

  Where `f` is the square root for logarithmic returns and the identity function
  for absolute returns. If `times` is not supplied then it is assumed that
  `dt_k = 1` everywhere. The arbitrary scaling factor `c` enables various
  flavours of averaging or annualization (for examples of which see [1] or
  section 9.7 of [2]).

  #### Examples

  Calculation of realized logarithmic volatility as in [1]:

  ```python
  import tensorflow as tf
  import tf_quant_finance as tff
  dtype=tf.float64
  num_samples = 1000
  num_times = 252
  seed = (1, 2)
  annual_vol = 20
  sigma = annual_vol / (100 * np.sqrt(num_times - 1))
  mu = -0.5*sigma**2

  gbm = tff.models.GeometricBrownianMotion(mu=mu, sigma=sigma, dtype=dtype)
  sample_paths = gbm.sample_paths(
      times=range(num_times),
      num_samples=num_samples,
      seed=seed,
      random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC)

  annualization = 100 * np.sqrt( (num_times / (num_times - 1)) )
  tf.math.reduce_mean(
    realized_volatility(sample_paths,
                        scaling_factors=annualization,
                        path_scale=PathScale.ORIGINAL,
                        axis=1))
  # 20.03408344960287
  ```

  Carrying on with the same paths the realized absolute volatility (`RV_d2` in
  [3]) is:

  ```
  scaling = 100 * np.sqrt((np.pi/(2 * (num_times-1))))
  tf.math.reduce_mean(
    realized_volatility(sample_paths,
                        scaling_factors=scaling,
                        returns_type=ReturnsType.ABS,
                        path_scale=PathScale.LOG))
  # 19.811590402553158
  ```

  #### References:
  [1]: CBOE. Summary Product Specifications Chart for S&P 500 Variance Futures.
  2012.
  https://cdn.cboe.com/resources/futures/sp_500_variance_futures_contract.pdf
  [2]: Iain J. Clark. Foreign exchange option pricing - A Practitioner's
  guide. Chapter 5. 2011.
  [3]: Zhu, S.P. and Lian, G.H., 2015. Analytically pricing volatility swaps
  under stochastic volatility. Journal of Computational and Applied Mathematics.

  Args:
    sample_paths: A real `Tensor` of shape
      `batch_shape_0 + [N] + batch_shape_1`.
    times: A real `Tensor` of shape compatible with `batch_shape_0 + [N] +
      batch_shape_1`. The times represented on the axis of interest (the `t_k`).
      Default value: None. Resulting in the assumption of unit time increments.
    scaling_factors: An optional real `Tensor` of shape compatible with
      `batch_shape_0 + batch_shape_1`. Any scaling factors to be applied to the
      result (e.g. for annualization).
      Default value: `None`. Resulting in `c=1` in the above calculation.
    returns_type: Value of ReturnsType. Indicates which definition of returns
      should be used.
      Default value: ReturnsType.LOG, representing logarithmic returns.
    path_scale: Value of PathScale. Indicates which space the supplied
      `sample_paths` are in. If required the paths will then be transformed onto
      the appropriate scale.
      Default value: PathScale.ORIGINAL.
    axis: Python int. The axis along which to calculate the statistic.
      Default value: -1 (the final axis).
    dtype: `tf.DType`. If supplied the dtype for the input and output `Tensor`s.
      Default value: `None` leading to use of `sample_paths`.
    name: Python str. The name to give to the ops created by this function.
      Default value: `None` which maps to 'realized_volatility'.

  Returns:
    Tensor of shape equal to `batch_shape_0 + batch_shape_1` (i.e. with axis
      `axis` having been reduced over).
  """
  with tf.name_scope(name or 'realized_volatility'):
    sample_paths = tf.convert_to_tensor(sample_paths, dtype=dtype,
                                        name='sample_paths')
    dtype = dtype or sample_paths.dtype
    if returns_type == ReturnsType.LOG:
      component_transform = lambda t: tf.pow(t, 2)
      result_transform = tf.math.sqrt
      if path_scale == PathScale.ORIGINAL:
        transformed_paths = tf.math.log(sample_paths)
      elif path_scale == PathScale.LOG:
        transformed_paths = sample_paths
    elif returns_type == ReturnsType.ABS:
      component_transform = tf.math.abs
      result_transform = tf.identity
      if path_scale == PathScale.ORIGINAL:
        transformed_paths = sample_paths
      elif path_scale == PathScale.LOG:
        transformed_paths = tf.math.exp(sample_paths)

    diffs = component_transform(
        diff_ops.diff(transformed_paths, order=1, exclusive=True, axis=axis))
    denominators = 1
    if times is not None:
      times = tf.convert_to_tensor(times, dtype=dtype, name='times')
      denominators = diff_ops.diff(times, order=1, exclusive=True, axis=axis)
    if returns_type == ReturnsType.ABS:
      slices = transformed_paths.shape.rank * [slice(None)]
      slices[axis] = slice(None, -1)
      denominators = denominators * component_transform(
          transformed_paths[slices])
    path_statistics = result_transform(
        tf.math.reduce_sum(diffs / denominators, axis=axis))
    if scaling_factors is not None:
      scaling_factors = tf.convert_to_tensor(
          scaling_factors, dtype=dtype, name='scaling_factors')
      return scaling_factors * path_statistics
    return path_statistics
Esempio n. 3
0
def replicating_weights(ordered_strikes,
                        reference_strikes,
                        expiries,
                        validate_args=False,
                        dtype=None,
                        name=None):
    """Calculates the weights for options to recreate the variance swap payoff.

  This implements the approach in Appendix A of Demeterfi et al for calculating
  the weight of European options required to replicate the payoff of a variance
  swap given traded strikes. In particular this function calculates the weights
  for the put option part of the portfolio (when `ordered_strikes` is descending
  ) or for the call option part of the portfolio (when `ordered_strikes`
  is ascending). See the fair strike docstring for further details on variance
  swaps.

  #### Example

  ```python
  dtype = tf.float64
  ordered_put_strikes = [100, 95, 90, 85]
  reference_strikes = ordered_put_strikes[0]
  expiries = 0.25
  # Contains weights for put options at ordered_put_strikes[:-1]
  put_weights = variance_replicating_weights(
    ordered_put_strikes, reference_strikes, expiries, dtype=dtype)
  # [0.00206927, 0.00443828, 0.00494591]
  ```

  #### References

  [1] Demeterfi, K., Derman, E., Kamal, M. and Zou, J., 1999. More Than You Ever
    Wanted To Know About Volatility Swaps. Goldman Sachs Quantitative Strategies
    Research Notes.

  Args:
    ordered_strikes: A real `Tensor` of liquidly traded strikes of shape
      `batch_shape + [num_strikes]`. The last entry will not receive a weight in
      the portfolio. The values must be sorted ascending if the strikes are for
      calls, or descending if the strikes are for puts. The final value in
      `ordered_strikes` will not itself receive a weight.
    reference_strikes: A `Tensor` of the same dtype as `ordered_strikes` and of
      shape compatible with `batch_shape`. An arbitrarily chosen strike
      representing an at the money strike price.
    expiries: A `Tensor` of the same dtype as `ordered_strikes` and of shape
      compatible with `batch_shape`. Represents the time to maturity of the
      options.
    validate_args: Python `bool`. When `True`, input `Tensor`s are checked for
      validity. The checks verify that `ordered_strikes` is indeed ordered. When
      `False` invalid inputs may silently render incorrect outputs, yet runtime
      performance may be improved.
      Default value: False.
    dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.
      Default value: None leading to use of `ordered_strikes.dtype`.
    name: Python str. The name to give to the ops created by this function.
      Default value: `None` which maps to 'variance_replicating_weights'.

  Returns:
    A `Tensor` of shape `batch_shape + [num_strikes - 1]` representing the
    weight which should be given to each strike in the replicating portfolio,
    save for the final strike which is not represented.
  """
    with tf.name_scope(name or 'replicating_weights'):
        # Input conversion.
        ordered_strikes = tf.convert_to_tensor(ordered_strikes,
                                               dtype=dtype,
                                               name='ordered_strikes')
        dtype = dtype or ordered_strikes.dtype
        reference_strikes = tf.expand_dims(
            tf.convert_to_tensor(reference_strikes,
                                 dtype=dtype,
                                 name='reference_strikes'), -1)
        expiries = tf.expand_dims(
            tf.convert_to_tensor(expiries, dtype=dtype, name='expiries'), -1)
        # Descending is required for the formulae regardless of use as control dep.
        strike_diff = diff_ops.diff(ordered_strikes, order=1, exclusive=True)
        strikes_descending = tf.math.reduce_all(strike_diff < 0)
        control_dependencies = []
        if validate_args:
            strikes_ascending = tf.math.reduce_all(strike_diff > 0)
            control_dependencies.append(
                tf.compat.v1.debugging.Assert(
                    tf.math.logical_or(strikes_descending, strikes_ascending),
                    [strike_diff]))
        with tf.control_dependencies(control_dependencies):
            # Weights calculation
            term_lin = (ordered_strikes -
                        reference_strikes) / reference_strikes
            term_log = tf.math.log(ordered_strikes) - tf.math.log(
                reference_strikes)
            payoff = (2.0 / expiries) * (term_lin - term_log)
            payoff_diff = diff_ops.diff(payoff, order=1, exclusive=True)
            r_vals = tf.math.divide_no_nan(payoff_diff, strike_diff)
            zero = tf.zeros(r_vals.shape[:-1] + [1], dtype=r_vals.dtype)
            r_vals_diff = diff_ops.diff(tf.concat([zero, r_vals], axis=-1),
                                        order=1,
                                        exclusive=True)
            # If the strikes were for puts we need to flip the sign before returning.
            return tf.where(strikes_descending, -r_vals_diff, r_vals_diff)
Esempio n. 4
0
def segment_diff(x,
                 segment_ids,
                 order=1,
                 exclusive=False,
                 dtype=None,
                 name=None):
    """Computes difference of successive elements in a segment.

  For a complete description of segment_* ops see documentation of
  `tf.segment_max`. This op extends the `diff` functionality to segmented
  inputs.

  The behaviour of this op is the same as that of the op `diff` within each
  segment. The result is effectively a concatenation of the results of `diff`
  applied to each segment.

  ## Example

  ```python
    x = tf.constant([2, 5, 1, 7, 9] + [32, 10, 12, 3] + [4, 8, 5])
    segments = tf.constant([0, 0, 0, 0, 0] + [1, 1, 1, 1] + [2, 2, 2])
    # First order diff. Expected result: [3, -4, 6, 2, -22, 2, -9, 4, -3]
    dx1 = segment_diff(
        x, segment_ids=segments, order=1, exclusive=True)
    # Non-exclusive, second order diff.
    # Expected result: [2, 5, -1, 2, 8, 32, 10, -20, -7, 4, 8, 1]
    dx2 = segment_diff(
        x, segment_ids=segments, order=2, exclusive=False)
  ```

  Args:
    x: A rank 1 `Tensor` of any dtype for which arithmetic operations are
      permitted.
    segment_ids: A `Tensor`. Must be one of the following types: int32, int64. A
      1-D tensor whose size is equal to the size of `x`. Values should be sorted
      and can be repeated.
    order: Positive Python int. The order of the difference to compute. `order =
      1` corresponds to the difference between successive elements.
      Default value: 1
    exclusive: Python bool. See description above.
      Default value: False
    dtype: Optional `tf.Dtype`. If supplied, the dtype for `x` to use when
      converting to `Tensor`.
      Default value: None which maps to the default dtype inferred by TF.
    name: Python `str` name prefixed to Ops created by this class.
      Default value: None which is mapped to the default name 'segment_diff'.

  Returns:
    diffs: A `Tensor` of the same dtype as `x`. Assuming that each segment is
      of length greater than or equal to order, if `exclusive` is True,
      then the size is `n-order*k` where `n` is the size of x,
      `k` is the number of different segment ids supplied if `segment_ids` is
      not None or 1 if `segment_ids` is None. If any of the segments is of
      length less than the order, then the size is:
      `n-sum(min(order, length(segment_j)), j)` where the sum is over segments.
      If `exclusive` is False, then the size is `n`.
  """
    with tf.compat.v1.name_scope(name, default_name='segment_diff',
                                 values=[x]):
        x = tf.convert_to_tensor(x, dtype=dtype)
        raw_diffs = diff_ops.diff(x, order=order, exclusive=exclusive)
        if segment_ids is None:
            return raw_diffs
        # If segment ids are supplied, raw_diffs are incorrect at locations:
        # p, p+1, ... min(p+order-1, m_p-1) where p is the index of the first
        # element of a segment other than the very first segment (which is
        # already correct). m_p is the segment length.
        # Find positions where the segments begin.
        has_segment_changed = tf.concat(
            [[False],
             tf.not_equal(segment_ids[1:] - segment_ids[:-1], 0)],
            axis=0)
        # Shape [k, 1]
        segment_start_index = tf.cast(tf.where(has_segment_changed),
                                      dtype=tf.int32)
        segment_end_index = tf.concat([
            tf.reshape(segment_start_index, [-1])[1:], [tf.size(segment_ids)]
        ],
                                      axis=0)
        segment_end_index = tf.reshape(segment_end_index, [-1, 1])
        # The indices of locations that need to be adjusted. This needs to be
        # constructed in steps. First we generate p, p+1, ... p+order-1.
        # Shape [num_segments-1, order]
        fix_indices = (segment_start_index +
                       tf.range(order, dtype=segment_start_index.dtype))
        in_bounds = tf.where(fix_indices < segment_end_index)
        # Keep only the ones in bounds.
        fix_indices = tf.reshape(tf.gather_nd(fix_indices, in_bounds), [-1, 1])

        needs_fix = tf.scatter_nd(
            fix_indices,
            # Unfortunately, scatter_nd doesn't support bool on GPUs so we need to
            # do ints here and then convert to bool.
            tf.reshape(tf.ones_like(fix_indices, dtype=tf.int32), [-1]),
            shape=tf.shape(x))
        # If exclusive is False, then needs_fix means we need to replace the values
        # in raw_diffs at those locations with the values in x.
        needs_fix = tf.cast(needs_fix, dtype=tf.bool)
        if not exclusive:
            return tf.where(needs_fix, x, raw_diffs)

        # If exclusive is True, we have to be more careful. The raw_diffs
        # computation has removed the first 'order' elements. After removing the
        # corresponding elements from needs_fix, we use it to remove the elements
        # from raw_diffs.
        return tf.boolean_mask(raw_diffs, tf.logical_not(needs_fix[order:]))