Exemple #1
0
    def _setup(self, coupon_spec):
        """Setup tensors for efficient computations."""

        cpn_frequency = dates.periods.PeriodTensor.stack(
            [x.coupon_frequency for x in coupon_spec], axis=0)
        cpn_dates = self._generate_schedule(cpn_frequency,
                                            coupon_spec[-1].businessday_rule)
        payment_dates = cpn_dates[:, 1:]
        notional = tf.expand_dims(tf.convert_to_tensor(
            [x.notional for x in coupon_spec], dtype=self._dtype),
                                  axis=-1)
        notional = tf.repeat(notional,
                             payment_dates.shape.as_list()[-1],
                             axis=-1)
        daycount_fractions = rc.get_daycount_fraction(
            cpn_dates[:, :-1],
            cpn_dates[:, 1:],
            coupon_spec[-1].daycount_convention,
            dtype=self._dtype)
        fixed_rate = tf.convert_to_tensor([x.coupon_rate for x in coupon_spec],
                                          dtype=self._dtype)
        coupon_rate = tf.expand_dims(fixed_rate, axis=-1)
        coupon_rate = tf.repeat(coupon_rate,
                                payment_dates.shape.as_list()[-1],
                                axis=-1)
        contract_index = tf.repeat(tf.range(0, len(coupon_spec)),
                                   notional.shape.as_list()[-1])

        self._payment_dates = payment_dates.reshape([-1])
        self._notional = tf.reshape(notional, [-1])
        self._daycount_fractions = tf.reshape(daycount_fractions, [-1])
        self._coupon_rate = tf.reshape(coupon_rate, [-1])
        self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype)
        self._contract_index = contract_index
Exemple #2
0
 def _bond_reconstitution(self, times, curve_times, mean_reversion,
                          rate_paths, y_t):
     """Compute discount bond prices using Eq. 10.18 in Ref [2]."""
     num_curve_nodes = curve_times.shape.as_list()[0]  # m
     num_sim_steps = times[1:].shape.as_list()[0]  # k
     t = tf.reshape(
         tf.repeat(tf.expand_dims(times[1:], axis=-1), self._dim, axis=-1),
         (1, 1, num_sim_steps, self._dim))
     curve_times = tf.reshape(curve_times, (1, num_curve_nodes, 1, 1))
     curve_times = tf.repeat(curve_times, self._dim, axis=-1)
     f_0_t = self._instant_forward_rate_fn(t)
     x_t = tf.expand_dims(rate_paths, axis=1) - f_0_t
     p_0_t = tf.math.exp(-self._initial_discount_rate_fn(t) * t)
     p_0_t_tau = tf.math.exp(
         -self._initial_discount_rate_fn(curve_times + t) *
         (curve_times + t)) / p_0_t
     # Transpose so the `dim` is the trailing dimension.
     kappa = tf.transpose(mean_reversion[:, 1:])
     kappa = tf.reshape(kappa, (1, 1, num_sim_steps, self._dim))
     g_t_tau = (1. - tf.math.exp(-kappa * curve_times)) / kappa
     term1 = x_t * g_t_tau
     y_t = tf.reshape(tf.transpose(y_t[:, 1:]),
                      (1, 1, num_sim_steps, self._dim))
     term2 = y_t * g_t_tau**2
     p_t_tau = p_0_t_tau * tf.math.exp(-term1 - 0.5 * term2)
     return p_t_tau
Exemple #3
0
  def input_fn():
    # text input
    text = tf.compat.v1.placeholder(tf.string, [batch_size], name="input_text")

    # text tokenize
    tokenizer = tft.SentencepieceTokenizer(
        model=tf.io.gfile.GFile(vocab_model_file, "rb").read())
    if substitute_newline:
      text = tf.strings.regex_replace(text, "\n", substitute_newline)
    ids = tokenizer.tokenize(text)
    ids = ids[:, :max_encoder_length - 2]

    # Add [CLS] and [SEP] special tokens.
    prefix = tf.repeat(tf.constant([[65]]), batch_size, axis=0)
    suffix = tf.repeat(tf.constant([[66]]), batch_size, axis=0)
    ids = tf.concat([prefix, ids, suffix], axis=1)
    if isinstance(ids, tf.RaggedTensor):
      ids = ids.to_tensor(0)

    # text padding: Pad only if necessary and reshape properly
    padded_ids = dynamic_padding(ids, max_encoder_length)
    ids = tf.slice(padded_ids, [0, 0], [batch_size, max_encoder_length])

    receiver_tensors = {"input": text}
    features = {"input_ids": tf.cast(ids, tf.int32, name="input_ids")}

    return tf.estimator.export.ServingInputReceiver(
        features=features, receiver_tensors=receiver_tensors)
Exemple #4
0
  def _setup_tensors(self):
    """Sets up tensors for efficient computations."""
    date_schedule = dates.PeriodicSchedule(
        start_date=self._start_date,
        end_date=self._maturity_date,
        tenor=self._reset_frequency).dates()

    # rates reset at the begining of coupon period
    reset_dates = date_schedule[:, :-1]
    # payments occur at the end of the coupon period
    payment_dates = date_schedule[:, 1:]
    daycount_fractions = rc.get_daycount_fraction(
        date_schedule[:, :-1],
        date_schedule[:, 1:],
        self._daycount_convention,
        dtype=self._dtype)
    contract_index = tf.repeat(
        tf.range(0, self._batch_size),
        payment_dates.shape.as_list()[-1])

    self._num_caplets = daycount_fractions.shape.as_list()[-1]
    # TODO(b/152164086): Use the functionality from dates library
    self._rate_term = tf.repeat(tf.cast(reset_dates[:, 0].days_until(
        payment_dates[:, 0]), dtype=self._dtype) / 365.0, self._num_caplets)
    self._reset_dates = dates.DateTensor.reshape(reset_dates, [-1])
    self._payment_dates = dates.DateTensor.reshape(payment_dates, [-1])
    self._accrual_start_dates = dates.DateTensor.reshape(reset_dates, [-1])
    self._accrual_end_dates = dates.DateTensor.reshape(payment_dates, [-1])
    self._daycount_fractions = tf.reshape(daycount_fractions, [-1])
    self._contract_index = contract_index
    self._strike = tf.repeat(self._strike, self._num_caplets)
    self._is_cap = tf.repeat(self._is_cap, self._num_caplets)
def _map_payoff_to_sim_times(indices, payoff, num_samples):
  """Maps the swaption payoffs to short rate simulation times.

  Swaption payoffs are calculated on bermudan swaption's expiries. However, for
  the LSM algorithm, we need short rate simulations and swaption payoffs at
  the union of all exercise times in the batch of swaptions. This function
  takes the payoff of individual swaption at their respective exercise times
  and maps it to all simulation times. This is done by setting the payoff to
  -1 whenever the simulation time is not equal to the swaption exercise time.

  Args:
    indices: A `Tensor` of shape `batch_shape + num_exercise_times` containing
      the index of exercise time in the vector of simulation times.
    payoff: A real tensor of shape
      `[num_samples] + batch_shape + num_exercise_times` containing the
      exercise value of the underlying swap on each exercise time.
    num_samples: A scalar `Tensor` specifying the number of samples on which
      swaption payoff is computed.

  Returns:
    A tuple of `Tensors`. The first tensor is a integer `Tensor` of shape
    `[num_samples] + batch_shape + [num_simulation_times]` and contains `1`
    if the corresponding simulation time is one of the exercise times for the
    swaption. The second `Tensor` is a real `Tensor` of same shape and contains
    the exercise value of the swaption if the corresponding simulation time is
    an exercise time for the swaption or -1 otherwise.
  """
  indices = tf.expand_dims(indices, axis=0)
  indices = tf.repeat(indices, num_samples, axis=0)
  index_list = list()
  tensor_shape = np.array(indices.shape.as_list())
  output_shape = indices.shape.as_list()[:-1] + [
      tf.math.reduce_max(indices) + 1
  ]
  num_elements = np.prod(tensor_shape)
  for dim, _ in enumerate(tensor_shape[:-1]):
    idx = tf.range(0, tensor_shape[dim], dtype=indices.dtype)
    idx = tf.tile(
        tf.repeat(idx, np.prod(tensor_shape[dim + 1:])),
        [np.prod(tensor_shape[:dim])])
    index_list.append(idx)

  index_list.append(tf.reshape(indices, [-1]))
  # We need to transform `payoff` from the initial shape of
  # [num_samples, batch_shape, num_exercise_times] to a new `Tensor` with
  # shape = [num_samples, batch_shape, num_exercise_times] such that
  # payoff_new[..., indices] = payoff
  # We achieve this by first creating a `payoff_new` as a SparseTensor with
  # nonzero values at appropriate indices based on the payoff_new.shape and
  # then converting the sparse tenson to dense tensor.
  sparse_indices = tf.cast(tf.stack(index_list, axis=-1), dtype=np.int64)
  is_exercise_time = tf.sparse.to_dense(
      tf.sparse.SparseTensor(sparse_indices, tf.ones(shape=num_elements),
                             output_shape),
      validate_indices=False)
  payoff = tf.sparse.to_dense(
      tf.sparse.SparseTensor(sparse_indices, tf.reshape(payoff, [-1]),
                             output_shape),
      validate_indices=False)
  return is_exercise_time, payoff
Exemple #6
0
    def _setup(self, coupon_spec):
        """Setup tensors for efficient computations."""

        cpn_frequency = dates.periods.PeriodTensor.stack(
            [x.coupon_frequency for x in coupon_spec], axis=0)
        cpn_dates = dates.PeriodicSchedule(
            start_date=self._start_date,
            end_date=self._end_date,
            tenor=cpn_frequency,
            roll_convention=coupon_spec[-1].businessday_rule).dates()
        accrual_start_dates = cpn_dates[:, :-1]
        ref_term = dates.periods.PeriodTensor.stack(
            [x.reference_rate_term for x in coupon_spec], axis=0)

        accrual_end_dates = cpn_dates[:, :
                                      -1] + dates.periods.PeriodTensor.expand_dims(
                                          ref_term, axis=-1).broadcast_to(
                                              accrual_start_dates.shape)
        coupon_start_dates = cpn_dates[:, :-1]
        coupon_end_dates = cpn_dates[:, 1:]
        payment_dates = cpn_dates[:, 1:]

        daycount_fractions = rc.get_daycount_fraction(
            cpn_dates[:, :-1],
            cpn_dates[:, 1:],
            coupon_spec[-1].daycount_convention,
            dtype=self._dtype)

        notional = tf.repeat(
            tf.convert_to_tensor([x.notional for x in coupon_spec],
                                 dtype=self._dtype),
            payment_dates.shape.as_list()[-1])

        coupon_basis = tf.repeat(
            tf.convert_to_tensor([x.coupon_basis for x in coupon_spec],
                                 dtype=self._dtype),
            payment_dates.shape.as_list()[-1])

        coupon_multiplier = tf.repeat(
            tf.convert_to_tensor([x.coupon_multiplier for x in coupon_spec],
                                 dtype=self._dtype),
            payment_dates.shape.as_list()[-1])

        contract_index = tf.repeat(tf.range(0, len(coupon_spec)),
                                   payment_dates.shape.as_list()[-1])

        self._num_cashflows = daycount_fractions.shape.as_list()[-1]
        self._coupon_start_dates = coupon_start_dates.reshape([-1])
        self._coupon_end_dates = coupon_end_dates.reshape([-1])
        self._payment_dates = payment_dates.reshape([-1])
        self._accrual_start_date = accrual_start_dates.reshape([-1])
        self._accrual_end_date = accrual_end_dates.reshape([-1])
        self._notional = notional
        self._daycount_fractions = tf.reshape(daycount_fractions, [-1])
        self._coupon_basis = coupon_basis
        self._coupon_multiplier = coupon_multiplier
        self._contract_index = contract_index
Exemple #7
0
def _prepare_indices(idx0, idx1, idx2, idx3):
    """Prepare indices to get relevant slice from discount curve simulations."""
    len0 = idx0.shape.as_list()[0]
    len1 = idx1.shape.as_list()[0]
    len3 = idx3.shape.as_list()[0]
    idx0 = tf.repeat(idx0, len1 * len3)
    idx1 = tf.tile(tf.repeat(idx1, len3), [len0])
    idx2 = tf.tile(tf.repeat(idx2, len3), [len0])
    idx3 = tf.tile(idx3, [len0 * len1])
    return tf.stack([idx0, idx1, idx2, idx3], axis=-1)
def _bond_option_variance(model, option_expiry, bond_maturity, dim):
    """Computes black equivalent variance for bond options.

  Black equivalent variance is definied as the variance to use in the Black
  formula to obtain the model implied price of European bond options.

  Args:
    model: An instance of `VectorHullWhiteModel`.
    option_expiry: A rank 1 `Tensor` of real dtype specifying the time to
      expiry of each option.
    bond_maturity: A rank 1 `Tensor` of real dtype specifying the time to
      maturity of underlying zero coupon bonds.
    dim: Dimensionality of the Hull-White process.

  Returns:
    A rank 1 `Tensor` of same dtype and shape as the inputs with computed
    Black-equivalent variance for the underlying options.
  """
    # pylint: disable=protected-access
    if model._sample_with_generic:
        raise ValueError('The paramerization of `mean_reversion` and/or '
                         '`volatility` does not support analytic computation '
                         'of bond option variance.')
    mean_reversion = model.mean_reversion(option_expiry)
    volatility = model.volatility(option_expiry)

    option_expiry = tf.repeat(tf.expand_dims(option_expiry, axis=0),
                              dim,
                              axis=0)
    bond_maturity = tf.repeat(tf.expand_dims(bond_maturity, axis=0),
                              dim,
                              axis=0)

    var_between_vol_knots = model._variance_int(model._padded_knots,
                                                model._jump_locations,
                                                model._jump_values_vol,
                                                model._jump_values_mr)
    varx_at_vol_knots = tf.concat([
        model._zero_padding,
        vector_hull_white._cumsum_using_matvec(var_between_vol_knots)
    ],
                                  axis=1)

    time_index = tf.searchsorted(model._jump_locations, option_expiry)
    vn = tf.concat([model._zero_padding, model._jump_locations], axis=1)

    var_expiry = model._variance_int(tf.gather(vn, time_index, batch_dims=1),
                                     option_expiry, volatility, mean_reversion)
    var_expiry = var_expiry + tf.gather(
        varx_at_vol_knots, time_index, batch_dims=1)
    var_expiry = var_expiry * (
        tf.math.exp(-mean_reversion * option_expiry) -
        tf.math.exp(-mean_reversion * bond_maturity))**2 / mean_reversion**2
    # gpylint: enable=protected-access
    return var_expiry
Exemple #9
0
 def likelihood_log_prob_fn(b0, b1, mu_out, sigma_out, weight):
     return tfd.Independent(
         tfd.Mixture(
             tfd.Categorical(probs=tf.stack([
                 tf.repeat(1 - weight[..., tf.newaxis], 20, axis=-1),
                 tf.repeat(weight[..., tf.newaxis], 20, axis=-1)
             ], -1)), [
                 tfd.Normal(loc=b0[..., tf.newaxis] +
                            b1[..., tf.newaxis] * predictors,
                            scale=y_sigma),
                 tfd.Normal(loc=mu_out[..., tf.newaxis],
                            scale=y_sigma + sigma_out[..., tf.newaxis])
             ]), 1).log_prob(obs)
def _prepare_indices(idx0, idx1, idx2, idx3):
  """Prepares indices to get relevant slice from discount curve simulations."""
  # For a 4-D `Tensor` x, creates indices for tf.gather_nd to retrieve
  # x[i, j, j, k].
  len0 = idx0.shape.as_list()[0]
  len1 = idx1.shape.as_list()[0]
  len3 = idx3.shape.as_list()[0]
  idx0 = tf.repeat(idx0, len1 * len3)
  idx1 = tf.tile(tf.repeat(idx1, len3), [len0])
  idx2 = tf.tile(tf.repeat(idx2, len3), [len0])
  idx3 = tf.tile(idx3, [len0 * len1])

  return tf.stack([idx0, idx1, idx2, idx3], axis=-1)
def _prepare_indices_ijjk(idx0, idx1, idx2, idx3):
    """Prepares indices to get x[i, j, j, k]."""
    # For a 4-D `Tensor` x, creates indices for tf.gather_nd to retrieve
    # x[i, j, j, k].
    len0 = tf.shape(idx0)[0]
    len1 = tf.shape(idx1)[0]
    len3 = tf.shape(idx3)[0]
    idx0 = tf.repeat(idx0, len1 * len3)
    idx1 = tf.tile(tf.repeat(idx1, len3), [len0])
    idx2 = tf.tile(tf.repeat(idx2, len3), [len0])
    idx3 = tf.tile(idx3, [len0 * len1])

    return tf.stack([idx0, idx1, idx2, idx3], axis=-1)
Exemple #12
0
def repeat(a, repeats, axis=None):  # pylint: disable=missing-docstring
  a = asarray(a).data
  original_shape = a._shape_as_list()  # pylint: disable=protected-access
  # Best effort recovery of the shape.
  if original_shape is not None and None not in original_shape:
    if not original_shape:
      original_shape = (repeats,)
    else:
      repeats_np = np.ravel(np.array(repeats))
      if repeats_np.size == 1:
        repeats_np = repeats_np.item()
        if axis is None:
          original_shape = (repeats_np * np.prod(original_shape),)
        else:
          original_shape[axis] = repeats_np * original_shape[axis]
      else:
        if axis is None:
          original_shape = (repeats_np.sum(),)
        else:
          original_shape[axis] = repeats_np.sum()

  repeats = asarray(repeats).data
  result = tf.repeat(a, repeats, axis)
  result.set_shape(original_shape)

  return utils.tensor_to_ndarray(result)
def _prepare_swaption_indices(tensor_shape):
  """Indices for `gather_nd` for analytic valuation.

  For a `Tensor` x of shape `tensor_shape` = [n] + batch_shape + [n], this
  function returns indices for tf.gather_nd to get `x[i,...,i]`

  Args:
    tensor_shape: A list of length `k` representing shape of the `Tensor`.

  Returns:
    A `Tensor` of shape (num_elements, k) where num_elements= n * batch_size
    of dtype tf.int64.
  """

  tensor_shape = np.array(tensor_shape, dtype=np.int64)
  batch_shape = tensor_shape[1:-1]
  batch_size = np.prod(batch_shape)
  index_list = []
  for i in range(len(tensor_shape)):
    index = np.arange(0, tensor_shape[i], dtype=np.int64)
    if i == 0 or i == len(tensor_shape) - 1:
      index = tf.tile(index, [batch_size])
    else:
      index = tf.tile(
          tf.repeat(index, np.prod(tensor_shape[i+1:])),
          [np.prod(tensor_shape[1:i])])
    index_list.append(index)

  return tf.stack(index_list, axis=-1)
Exemple #14
0
    def _conditional_mean_x(self, t, mr_t, sigma_t):
        """Computes the drift term in [1], Eq. 10.39."""
        t = tf.repeat(tf.expand_dims(t, axis=0), self._dim, axis=0)
        time_index = tf.searchsorted(self._jump_locations, t)
        vn = tf.concat([self._zero_padding, self._jump_locations], axis=1)
        y_between_vol_knots = self._y_integral(self._padded_knots,
                                               self._jump_locations,
                                               self._jump_values_vol,
                                               self._jump_values_mr)

        y_at_vol_knots = tf.concat(
            [self._zero_padding,
             _cumsum_using_matvec(y_between_vol_knots)],
            axis=1)

        ex_between_vol_knots = self._ex_integral(self._padded_knots,
                                                 self._jump_locations,
                                                 self._jump_values_vol,
                                                 self._jump_values_mr,
                                                 y_at_vol_knots[:, :-1])

        ex_at_vol_knots = tf.concat(
            [self._zero_padding,
             _cumsum_using_matvec(ex_between_vol_knots)],
            axis=1)

        c = tf.gather(y_at_vol_knots, time_index, batch_dims=1)
        exp_x_t = self._ex_integral(tf.gather(vn, time_index, batch_dims=1), t,
                                    sigma_t, mr_t, c)
        exp_x_t = exp_x_t + tf.gather(
            ex_at_vol_knots, time_index, batch_dims=1)
        exp_x_t = (exp_x_t[:, 1:] - exp_x_t[:, :-1]) * tf.math.exp(
            -tf.broadcast_to(mr_t, t.shape)[:, 1:] * t[:, 1:])
        return exp_x_t
Exemple #15
0
def _repeat_batch(batch_sizes: Sequence[int],
                  ds: tf.data.Dataset,
                  repeat: int = 1) -> tf.data.Dataset:
    """Tiles the inner most batch dimension."""
    if repeat <= 1:
        return ds
    if batch_sizes[-1] % repeat != 0:
        raise ValueError(
            f'The last element of `batch_sizes` ({batch_sizes}) must '
            f'be divisible by `repeat` ({repeat}).')
    # Perform regular batching with reduced number of elements.
    for i, batch_size in enumerate(reversed(batch_sizes)):
        ds = ds.batch(batch_size // repeat if i == 0 else batch_size,
                      drop_remainder=True)
    # Repeat batch.
    fn = lambda x: tf.repeat(x, repeats=repeat, axis=len(batch_sizes) - 1)

    def repeat_inner_batch(example):
        return jax.tree_map(fn, example)

    ds = ds.map(repeat_inner_batch, num_parallel_calls=tf.data.AUTOTUNE)
    # Unbatch.
    for _ in batch_sizes:
        ds = ds.unbatch()
    return ds
Exemple #16
0
    def _parse_fn(record):
        """Parses a record into a feature_dict."""
        feature_values = tf.io.parse_single_example(
            serialized=record,
            features={
                'i/o':
                tf.io.FixedLenFeature([], tf.string, default_value=''),
                'program_encoding':
                tf.io.FixedLenFeature([], tf.string, default_value=''),
            })

        ios = tf.strings.split(tf.strings.split(feature_values['i/o'],
                                                sep='>'),
                               sep='<')

        inputs, outputs = ios.merge_dims(0, 1)[::2], ios.merge_dims(0, 1)[1::2]
        # Step 1. Parse inputs into tokens.
        inputs = tf.strings.unicode_split(inputs, 'UTF-8').to_tensor()
        inputs = char_table.lookup(inputs)  # Map characters to tokens.

        # Step 2. Parse outputs into tokens.
        split_outputs = tf.strings.unicode_split(
            tf.strings.split(outputs, sep='|'), 'UTF-8')
        outputs = split_outputs.merge_dims(1, 2).to_tensor()
        outputs = char_table.lookup(outputs)
        # Partition output into substrings by partial program.
        split_outputs = tf.map_fn(
            lambda x: partition_ragged_tensor(x, num_partial_programs),
            split_outputs).to_tensor()
        split_outputs = char_table.lookup(split_outputs)

        # Step 3. Parse program into tokens.
        program_encoding = tf.strings.split(tf.strings.split(
            feature_values['program_encoding'], sep='|'),
                                            sep=' ')
        program_encoding = tf.strings.to_number(program_encoding,
                                                out_type=tf.int32)

        # Partition the rows of program into partial programs.
        program_encoding = partition_ragged_tensor(program_encoding,
                                                   num_partial_programs)

        # Add EOS token to each partial program.
        program_encoding = tf.map_fn(
            lambda x: tf.concat([x, [eos_token]], axis=-1),
            program_encoding).to_tensor()

        n_rows = tf.shape(program_encoding)[0]
        if n_rows < num_partial_programs:
            n_cols = tf.shape(program_encoding)[1]
            pad_sequence = tf.one_hot(0,
                                      n_cols,
                                      on_value=eos_token,
                                      dtype=tf.int32)
            pad_block = tf.repeat([pad_sequence],
                                  [num_partial_programs - n_rows],
                                  axis=0)
            program_encoding = tf.concat([program_encoding, pad_block], axis=0)

        return inputs, outputs, program_encoding, split_outputs
Exemple #17
0
def _sample_bates(total_count, low, high, n, seed=None):
  """Vectorized production of `Bates` samples.

  Args:
    total_count: (Batches of) counts of `Uniform`s to take means of.  Should
      have integer dtype and already be broadcasted to the batch shape.
    low: (Batches of) lower bounds of the `Uniform` variables to sample.  Should
      be the same floating dtype as `high` and broadcastable to the batch shape.
    high: (Batches of) upper bounds of the `Uniform` variables to sample. Should
      be the same floating dtype as `low` and broadcastable to the batch shape.
    n: `int32` number of samples to generate.
    seed: Random seed to pass to `Uniform` sampler.

  Returns:
    samples: Samples of (batches of) the `Bates` variable.  Will have same dtype
      as `low` and `high`. If the batch shape is `[B1,..., Bn]`, `samples` has
      shape `[n, B1,..., Bn]`.
  """

  # 1. Sample Uniform(0, 1)s, flattening the batch dimension into axis 0.
  uniform_sample_shape = tf.concat([[tf.reduce_sum(total_count)], [n]], axis=0)
  uniform_samples = samplers.uniform(
      uniform_sample_shape, minval=0., maxval=1., dtype=low.dtype, seed=seed)
  # 2. Produce segment means.
  segment_lengths = tf.reshape(total_count, [-1])
  segment_ids = tf.repeat(tf.range(tf.size(segment_lengths)), segment_lengths)
  flatmeans = tf.math.segment_mean(uniform_samples, segment_ids)
  # 3. Reshape and transpose segment means back to the original shape.
  outshape = tf.concat([tf.shape(total_count), [n]], axis=0)
  tmeans = tf.reshape(flatmeans, outshape)
  axes = tf.range(tf.rank(tmeans))
  means = tf.transpose(tmeans, tf.roll(axes, shift=1, axis=0))
  # 4. Shift/scale from (0, 1) to (low, high).
  return low + (high - low) * means
        def step_fn(inputs):
            """Per-Replica StepFn."""
            # Note that we don't use tf.tile for labels here
            images, labels = inputs
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            # get lambdas
            lambdas = log_uniform_mean(lambda_parameters)
            rep_lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0)

            # eval on testsets
            logits = model([images, rep_lambdas], training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)
            per_probs = tf.split(probs,
                                 num_or_size_splits=FLAGS.ensemble_size,
                                 axis=0)

            # per member performance and gibbs performance (average per member perf)
            if dataset_name == 'clean':
                for i in range(FLAGS.ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)

                labels_tile = tf.tile(labels, [FLAGS.ensemble_size])
                metrics['test/gibbs_nll'].update_state(
                    tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels_tile, logits, from_logits=True)))
                metrics['test/gibbs_accuracy'].update_state(labels_tile, probs)

            # ensemble performance
            negative_log_likelihood = ensemble_crossentropy(
                labels, logits, FLAGS.ensemble_size)
            probs = tf.reduce_mean(per_probs, axis=0)
            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

            if dataset_name == 'clean':
                per_probs_stacked = tf.stack(per_probs, axis=0)
                diversity_results = um.average_pairwise_diversity(
                    per_probs_stacked, FLAGS.ensemble_size)
                for k, v in diversity_results.items():
                    metrics['test/' + k].update_state(v)
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            with tf.GradientTape(watch_accessed_variables=False) as tape:
                tape.watch(lambda_parameters)

                # sample lambdas
                if FLAGS.sample_and_tune:
                    lambdas = log_uniform_sample(per_core_batch_size,
                                                 lambda_parameters)
                else:
                    lambdas = log_uniform_mean(lambda_parameters)
                    lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0)
                lambdas = tf.reshape(lambdas,
                                     (FLAGS.ensemble_size *
                                      per_core_batch_size, lambdas_config.dim))
                # ensemble CE
                logits = model([images, lambdas], training=False)
                ce = ensemble_crossentropy(labels, logits, FLAGS.ensemble_size)
                # entropy penalty for lambda distribution
                entropy = FLAGS.tau * log_uniform_entropy(lambda_parameters)
                loss = ce - entropy
                scaled_loss = loss / strategy.num_replicas_in_sync

            gradients = tape.gradient(loss, lambda_parameters)
            tuner.apply_gradients(zip(gradients, lambda_parameters))

            metrics['validation/loss_ce'].update_state(
                ce / strategy.num_replicas_in_sync)
            metrics['validation/loss_entropy'].update_state(
                entropy / strategy.num_replicas_in_sync)
            metrics['validation/loss'].update_state(scaled_loss)
Exemple #20
0
  def _setup(self, coupon_spec):
    """Setup tensors for efficient computations."""

    if isinstance(coupon_spec, list):
      cpn_frequency = dates.periods.PeriodTensor.stack(
          [x.coupon_frequency for x in coupon_spec], axis=0)
      businessday_rule = coupon_spec[-1].businessday_rule
      notional = tf.convert_to_tensor([x.notional for x in coupon_spec],
                                      dtype=self._dtype)
      fixed_rate = tf.convert_to_tensor([x.coupon_rate for x in coupon_spec],
                                        dtype=self._dtype)
      daycount_convention = coupon_spec[-1].daycount_convention
    else:
      cpn_frequency = coupon_spec.coupon_frequency
      businessday_rule = coupon_spec.businessday_rule
      notional = tf.broadcast_to(
          tf.convert_to_tensor(coupon_spec.notional, dtype=self._dtype),
          self._start_date.shape)
      fixed_rate = tf.broadcast_to(
          tf.convert_to_tensor(coupon_spec.coupon_rate, dtype=self._dtype),
          self._start_date.shape)
      daycount_convention = coupon_spec.daycount_convention

    cpn_dates, _ = self._generate_schedule(cpn_frequency, businessday_rule)
    payment_dates = cpn_dates[:, 1:]

    notional = tf.repeat(notional, payment_dates.shape.as_list()[-1])
    daycount_fractions = rc.get_daycount_fraction(
        cpn_dates[:, :-1],
        cpn_dates[:, 1:],
        daycount_convention,
        dtype=self._dtype)

    coupon_rate = tf.expand_dims(fixed_rate, axis=-1)
    coupon_rate = tf.repeat(coupon_rate, payment_dates.shape.as_list()[-1])
    contract_index = tf.repeat(tf.range(0, self._batch_size),
                               payment_dates.shape.as_list()[-1])

    self._num_cashflows = payment_dates.shape.as_list()[-1]
    self._payment_dates = payment_dates.reshape([-1])
    self._notional = notional
    self._daycount_fractions = tf.reshape(daycount_fractions, [-1])
    self._coupon_rate = coupon_rate
    self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype)
    self._contract_index = contract_index
 def __call__(self, states, inputs, initialize):
     """The batch size is extracted from inputs. Therefore, beware of
 transformations that flatten inputs."""
     new_states = copy.copy(states)
     new_states[self.out_name] = tf.repeat(tf.expand_dims(
         states[self.sname], 0),
                                           inputs.shape[0],
                                           axis=0)
     return new_states, inputs, {}
    def __call__(self, states, inputs, initialize):
        new_states = copy.copy(states)

        new_states[self.sname] = tf.repeat(tf.expand_dims(
            states[self.sname], self.axis),
                                           self.repeats,
                                           axis=self.axis)

        return new_states, inputs, {}
Exemple #23
0
    def price(self, valuation_date, market, model=None, name=None):
        """Returns the present value of the stream on the valuation date.

    Args:
      valuation_date: A scalar `DateTensor` specifying the date on which
        valuation is being desired.
      market: A namedtuple of type `InterestRateMarket` which contains the
        necessary information for pricing the cashflow stream.
      model: Reserved for future use.
      name: Python str. The name to give to the ops created by this function.
        Default value: `None` which maps to 'price'.

    Returns:
      A Rank 1 `Tensor` of real type containing the modeled price of each stream
      contract based on the input market data.
    """

        del model
        name = name or (self._name + '_price')
        with tf.name_scope(name):
            discount_curve = market.discount_curve
            reference_curve = market.reference_curve
            libor_rate = rc.get_rate_index(market,
                                           self._start_date,
                                           rc.RateIndexType.LIBOR,
                                           dtype=self._dtype)
            libor_rate = tf.repeat(
                tf.convert_to_tensor(libor_rate, dtype=self._dtype),
                self._num_cashflows)

            discount_factors = discount_curve.get_discount_factor(
                self._payment_dates)
            forward_rates = reference_curve.get_forward_rate(
                self._accrual_start_date, self._accrual_end_date,
                self._daycount_fractions)

            forward_rates = tf.where(self._daycount_fractions > 0.,
                                     forward_rates,
                                     tf.zeros_like(forward_rates))
            # If coupon end date is before the valuation date, the payment is in the
            # past. If valuation date is between coupon start date and coupon end
            # date, then the rate has been fixed but not paid. Otherwise the rate is
            # not fixed and should be read from the curve.
            forward_rates = tf.where(
                self._coupon_end_dates < valuation_date,
                tf.constant(0., dtype=self._dtype),
                tf.where(self._coupon_start_dates < valuation_date, libor_rate,
                         forward_rates))

            coupon_rate = self._coupon_multiplier * (forward_rates +
                                                     self._coupon_basis)

            cashflow_pvs = self._notional * (self._daycount_fractions *
                                             coupon_rate * discount_factors)
            return tf.math.reduce_sum(tf.reshape(
                cashflow_pvs, (self._batch_size, self._num_cashflows)),
                                      axis=1)
Exemple #24
0
  def price(self, valuation_date, market, model=None, pricing_context=None,
            name=None):
    """Returns the present value of the stream on the valuation date.

    Args:
      valuation_date: A scalar `DateTensor` specifying the date on which
        valuation is being desired.
      market: A namedtuple of type `InterestRateMarket` which contains the
        necessary information for pricing the cashflow stream.
      model: An optional input of type `InterestRateModelType` to specify which
        model to use for pricing.
        Default value: `None` in which case `NORMAL_RATE` model is used.
      pricing_context: An optional input to provide additional parameters (such
        as model parameters) relevant for pricing.
      name: Python str. The name to give to the ops created by this function.
        Default value: `None` which maps to 'price'.

    Returns:
      A Rank 1 `Tensor` of real type containing the modeled price of each stream
      contract based on the input market data.
    """

    name = name or (self._name + '_price')
    with tf.name_scope(name):
      valuation_date = dates.convert_to_date_tensor(valuation_date)
      discount_curve = market.discount_curve
      past_fixing = rc.get_rate_index(
          market, self._start_date, rc.RateIndexType.SWAP, dtype=self._dtype)
      past_fixing = tf.repeat(
          tf.convert_to_tensor(past_fixing, dtype=self._dtype),
          self._num_cashflows)

      discount_factors = discount_curve.get_discount_factor(self._payment_dates)
      cms_rates = self._swap.par_rate(valuation_date, market, model)

      cms_rates = tf.where(self._daycount_fractions > 0., cms_rates,
                           tf.zeros_like(cms_rates))
      # If coupon end date is before the valuation date, the payment is in the
      # past. If valuation date is between coupon start date and coupon end
      # date, then the rate has been fixed but not paid. Otherwise the rate is
      # not fixed and should be read from the curve.
      cms_rates = tf.where(
          self._coupon_end_dates < valuation_date,
          tf.constant(0., dtype=self._dtype),
          tf.where(self._coupon_start_dates < valuation_date,
                   past_fixing, cms_rates))
      cms_rates = self._adjust_convexity(
          valuation_date, market, model, pricing_context, cms_rates,
          discount_factors)

      coupon_rate = self._coupon_multiplier * (
          cms_rates + self._coupon_basis)

      cashflow_pvs = self._notional * (
          self._daycount_fractions * coupon_rate * discount_factors)
      return tf.math.segment_sum(cashflow_pvs, self._contract_index)
    def _split(wav, label):
      # wav shape: (audio_samples, )
      chunks = tf.math.floordiv(len(wav), self.expected_waveform_len)
      unused = tf.math.floormod(len(wav), self.expected_waveform_len)
      # Drop unused data
      wav = wav[:len(wav) - unused]
      # Split the audio sample into multiple chunks
      wav = tf.reshape(wav, (chunks, 1, self.expected_waveform_len))

      return wav, tf.repeat(tf.expand_dims(label, 0), len(wav))
    def test_variance_swap_fair_strike_supports_batching(self, validate_args):
        dtype = tf.float64
        batch_call_strikes = tf.repeat(tf.expand_dims(
            tf.range(100, 120, 5, dtype=dtype), 0),
                                       3,
                                       axis=0)
        batch_put_strikes = tf.repeat(tf.expand_dims(
            tf.range(100, 80, -5, dtype=dtype), 0),
                                      3,
                                      axis=0)
        batch_vols = 0.2 * tf.ones((3, 4), dtype=dtype)
        batch_shape = (3, )
        reference_strikes = 100.0 * tf.ones(batch_shape, dtype=dtype)
        batch_expiries = tf.constant([0.25, 0.5, 1.0], dtype=dtype)
        discount_rates = 0.05 * tf.ones(batch_shape, dtype=dtype)
        batch_variance_price = self.evaluate(
            tff.black_scholes.variance_swap_fair_strike(
                batch_put_strikes,
                batch_vols,
                batch_call_strikes,
                batch_vols,
                batch_expiries,
                discount_rates,
                reference_strikes,
                reference_strikes,
                validate_args=validate_args,
                dtype=dtype))

        self.assertEqual(batch_variance_price.shape, batch_shape)
        for i in range(3):
            row_variance_price = self.evaluate(
                tff.black_scholes.variance_swap_fair_strike(
                    batch_put_strikes[i, :],
                    batch_vols[i, :],
                    batch_call_strikes[i, :],
                    batch_vols[i, :],
                    batch_expiries[i],
                    discount_rates[i],
                    reference_strikes[i],
                    reference_strikes[i],
                    dtype=tf.float64))
            self.assertAllEqual(row_variance_price, batch_variance_price[i])
 def broadcast_info(
         info_traj: types.ReverbReplaySample
 ) -> types.ReverbReplaySample:
     # Assumes that the first element of traj is shaped
     # (sequence_length, ...); and we extract this length.
     info, traj = info_traj
     first_elem = tf.nest.flatten(traj)[0]
     length = first_elem.shape[0] or tf.shape(first_elem)[0]
     info = tf.nest.map_structure(lambda t: tf.repeat(t, [length]),
                                  info)
     return reverb.ReplaySample(info, traj)
Exemple #28
0
 def _return_fn(t, spot):
     leverage_fn_interpolator = (
         math.interpolation.interpolation_2d.Interpolation2D(
             x_data=[times],
             y_data=tf.expand_dims(tf.repeat(grid[0] + x_scale,
                                             times.shape[0],
                                             axis=0),
                                   axis=0),
             z_data=tf.expand_dims(leverage_fn_values, axis=0),
             dtype=dtype))
     return leverage_fn_interpolator.interpolate(t, tf.math.log(spot))
Exemple #29
0
def _prepare_indices_ijj(idx0, idx1, idx2):
    """Prepares indices to get x[i, j, j]."""
    # For a 3-D `Tensor` x, creates indices for tf.gather_nd to retrieve
    # x[i, j, j].
    len0 = tf.shape(idx0)[0]
    len1 = tf.shape(idx1)[0]
    idx0 = tf.repeat(idx0, len1)
    idx1 = tf.tile(idx1, [len0])
    idx2 = tf.tile(idx2, [len0])

    # shape of return value: (idx0.shape[0] * idx1.shape[0] * idx2.shape[0], 3)
    return tf.stack([idx0, idx1, idx2], axis=-1)
Exemple #30
0
def _segmented_range(limits):
  """Equivalent to `tf.ragged.range(limits).flat_values`.

  Ragged Tensors are are not supported by numpy.

  Args:
    limits: Integer `Tensor` of sizes of each range.

  Returns:
    segments: 1D `Tensor` of segment ranges.
  """
  return (tf.range(tf.reduce_sum(limits)) -
          tf.repeat(tf.concat([[0], tf.cumsum(limits[:-1])], axis=0), limits))