Exemplo n.º 1
0
 def _cdf(self, x):
   x = tf.convert_to_tensor(x, name='x')
   flat_x = tf.reshape(x, shape=[-1])
   upper_bound = tf.searchsorted(self.outcomes, values=flat_x, side='right')
   values_at_ub = tf.gather(
       self.outcomes,
       indices=tf.minimum(upper_bound,
                          dist_util.prefer_static_shape(self.outcomes)[-1] -
                          1))
   should_use_upper_bound = self._is_equal_or_close(flat_x, values_at_ub)
   indices = tf.where(should_use_upper_bound, upper_bound, upper_bound - 1)
   return self._categorical.cdf(
       tf.reshape(indices, shape=dist_util.prefer_static_shape(x)))
Exemplo n.º 2
0
def _make_empty_queue_for(k, element):
    """Creates a `tf.Tensor` suitable to hold `k` element-shaped tensors.

  For example:

  ```python
    element = tf.constant([[0., 1., 2., 3., 4.],
                           [5., 6., 7., 8., 9.]])

    # A queue capable of holding 3 elements.
    _make_empty_queue_for(3, element)
    # => [[[ 0.,  0.,  0.,  0.,  0.],
    #      [ 0.,  0.,  0.,  0.,  0.]],
    #
    #     [[ 0.,  0.,  0.,  0.,  0.],
    #      [ 0.,  0.,  0.,  0.,  0.]],
    #
    #     [[ 0.,  0.,  0.,  0.,  0.],
    #      [ 0.,  0.,  0.,  0.,  0.]]]
  ```

  Args:
    k: A positive scalar integer, number of elements that each queue will hold.
    element: A `tf.Tensor`, only its shape and dtype information are relevant.

  Returns:
    A zero-filed `tf.Tensor` of shape `(k,) + tf.shape(element)` and same dtype
    as `element`.
  """
    queue_shape = tf.concat(
        [[k], distribution_util.prefer_static_shape(element)], axis=0)
    return tf.zeros(queue_shape, dtype=element.dtype.base_dtype)
Exemplo n.º 3
0
def make_ar_transition_matrix(coefficients):
  """Build transition matrix for an autoregressive StateSpaceModel.

  When applied to a vector of previous values, this matrix computes
  the expected new value (summing the previous states according to the
  autoregressive coefficients) in the top dimension of the state space,
  and moves all previous values down by one dimension, 'forgetting' the
  final (least recent) value. That is, it looks like this:

  ```
  ar_matrix = [ coefs[0], coefs[1], ..., coefs[order]
                1.,       0 ,       ..., 0.
                0.,       1.,       ..., 0.
                ...
                0.,       0.,  ..., 1.,  0.            ]
  ```

  Args:
    coefficients: float `Tensor` of shape `concat([batch_shape, [order]])`.

  Returns:
    ar_matrix: float `Tensor` with shape `concat([batch_shape,
    [order, order]])`.
  """

  top_row = tf.expand_dims(coefficients, -2)
  coef_shape = dist_util.prefer_static_shape(coefficients)
  batch_shape, order = coef_shape[:-1], coef_shape[-1]
  remaining_rows = tf.concat([
      tf.eye(order - 1, dtype=coefficients.dtype, batch_shape=batch_shape),
      tf.zeros(tf.concat([batch_shape, (order - 1, 1)], axis=0),
               dtype=coefficients.dtype)
  ], axis=-1)
  ar_matrix = tf.concat([top_row, remaining_rows], axis=-2)
  return ar_matrix
Exemplo n.º 4
0
def _make_empty_stack_like(element, k):
    """Creates a `tf.Tensor` suitable to hold k element-shaped vectors.

  For example:

  ```python
    element = tf.constant([1., 2., 3., 4., 5.])

    _make_empty_stack_like(element, 3)
    # => [[0., 0., 0., 0., 0.],
    #     [0., 0., 0., 0., 0.],
    #     [0., 0., 0., 0., 0.]]
  ```

  Args:
    element: A `tf.Tensor`, only its shape and dtype information are relevant.
    k: A positive scalar integer `tf.Tensor`.

  Returns:
    A zero-filed `Tensor` of shape `(k, ) + tf.shape(element)` and dtype same
    as `element`.
  """
    stack_shape = tf.concat(
        [[k], distribution_util.prefer_static_shape(element)], axis=0)
    return tf.zeros(stack_shape, dtype=element.dtype.base_dtype)
Exemplo n.º 5
0
    def __init__(self,
                 samples,
                 event_ndims=0,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='Empirical'):
        """Initialize `Empirical` distributions.

    Args:
      samples: Numeric `Tensor` of shape [B1, ..., Bk, S, E1, ..., En]`,
        `k, n >= 0`. Samples or batches of samples on which the distribution
        is based. The first `k` dimensions index into a batch of independent
        distributions. Length of `S` dimension determines number of samples
        in each multiset. The last `n` dimension represents samples for each
        distribution. n is specified by argument event_ndims.
      event_ndims: Python `int32`, default `0`. number of dimensions for each
        event. When `0` this distribution has scalar samples. When `1` this
        distribution has vector-like samples.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value `NaN` to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: if the rank of `samples` < event_ndims + 1.
    """

        parameters = locals()
        with tf.name_scope(name):
            self._samples = tf.convert_to_tensor(value=samples, name='samples')
            self._event_ndims = event_ndims
            self._samples_axis = ((tensorshape_util.rank(self.samples.shape)
                                   or tf.rank(self.samples)) -
                                  self._event_ndims - 1)
            with tf.control_dependencies([
                    assert_util.assert_rank_at_least(self._samples,
                                                     event_ndims + 1)
            ]):
                samples_shape = distribution_util.prefer_static_shape(
                    self._samples)
                self._num_samples = samples_shape[self._samples_axis]

        super(Empirical, self).__init__(
            dtype=self._samples.dtype,
            reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            parameters=parameters,
            graph_parents=[self._samples],
            name=name)
Exemplo n.º 6
0
 def _variance(self):
   probs = self._categorical.probs_parameter()
   outcomes = tf.broadcast_to(
       self.outcomes, shape=dist_util.prefer_static_shape(probs))
   if dtype_util.is_integer(outcomes.dtype):
     if self._validate_args:
       outcomes = dist_util.embed_check_integer_casting_closed(
           outcomes, target_dtype=probs.dtype)
     outcomes = tf.cast(outcomes, dtype=probs.dtype)
   square_d = tf.math.squared_difference(
       outcomes, self._mean(probs)[..., tf.newaxis])
   return tf.reduce_sum(probs * square_d, axis=-1)
Exemplo n.º 7
0
    def _log_prob(self, value):
        with tf.control_dependencies(self._runtime_assertions):
            # The argument `value` is a tensor of sequences of observations.
            # `observation_batch_shape` is the shape of that tensor with the
            # sequence part removed.
            # `observation_batch_shape` is then broadcast to the full batch shape
            # to give the `working_shape` that defines the shape of the result.

            observation_tensor_shape = util.prefer_static_shape(value)
            observation_batch_shape = observation_tensor_shape[:-1 - self.
                                                               _underlying_event_rank]
            # value :: observation_batch_shape num_steps observation_event_shape
            working_shape = tf.broadcast_dynamic_shape(
                observation_batch_shape, self.batch_shape_tensor())
            log_init = tf.broadcast_to(
                self._log_init,
                tf.concat([working_shape, [self._num_states]], axis=0))
            # log_init :: working_shape num_states
            log_transition = self._log_trans

            # `observation_event_shape` is the shape of each sequence of observations
            # emitted by the model.
            observation_event_shape = observation_tensor_shape[
                -1 - self._underlying_event_rank:]
            working_obs = tf.broadcast_to(
                value,
                tf.concat([working_shape, observation_event_shape], axis=0))
            # working_obs :: working_shape observation_event_shape
            r = self._underlying_event_rank

            # Move index into sequence of observations to front so we can apply
            # tf.foldl
            working_obs = util.move_dimension(working_obs, -1 - r,
                                              0)[..., tf.newaxis]
            # working_obs :: num_steps working_shape underlying_event_shape
            observation_probs = (
                self._observation_distribution.log_prob(working_obs))

            def forward_step(log_prev_step, log_observation):
                return _log_vector_matrix(log_prev_step,
                                          log_transition) + log_observation

            fwd_prob = tf.foldl(forward_step,
                                observation_probs,
                                initializer=log_init)
            # fwd_prob :: working_shape num_states

            log_prob = tf.reduce_logsumexp(fwd_prob, axis=-1)
            # log_prob :: working_shape

            return log_prob
Exemplo n.º 8
0
 def _log_prob(self, x):
   x = tf.convert_to_tensor(value=x, name='x')
   right_indices = tf.minimum(
       tf.size(input=self.outcomes) - 1,
       tf.reshape(
           tf.searchsorted(
               self.outcomes, values=tf.reshape(x, shape=[-1]), side='right'),
           dist_util.prefer_static_shape(x)))
   use_right_indices = self._is_equal_or_close(
       x, tf.gather(self.outcomes, indices=right_indices))
   left_indices = tf.maximum(0, right_indices - 1)
   use_left_indices = self._is_equal_or_close(
       x, tf.gather(self.outcomes, indices=left_indices))
   log_probs = self._categorical.log_prob(
       tf.compat.v1.where(use_left_indices, left_indices, right_indices))
   should_be_neg_inf = tf.broadcast_to(
       tf.logical_not(use_left_indices | use_right_indices),
       shape=dist_util.prefer_static_shape(log_probs))
   return tf.compat.v1.where(
       should_be_neg_inf,
       tf.fill(
           dist_util.prefer_static_shape(should_be_neg_inf),
           dtype_util.as_numpy_dtype(log_probs.dtype)(-np.inf)), log_probs)
Exemplo n.º 9
0
def _broadcast(value, target):
    """Broadcast a value to match the batching dimensions of a target.

  If necessary the value is converted into a tensor. Both value and target
  should be of the same dtype.

  Args:
    value: A value to broadcast.
    target: A `Tensor` of shape [b1, ..., bn, d].

  Returns:
    A `Tensor` of shape [b1, ..., bn] and same dtype as the target.
  """
    return tf.broadcast_to(tf.convert_to_tensor(value, dtype=target.dtype),
                           distribution_util.prefer_static_shape(target)[:-1])
Exemplo n.º 10
0
 def _cdf(self, x):
     x = tf.convert_to_tensor(x, name='x')
     flat_x = tf.reshape(x, shape=[-1])
     upper_bound = tf.searchsorted(self.outcomes,
                                   values=flat_x,
                                   side='right')
     values_at_ub = tf.gather(self.outcomes,
                              indices=tf.minimum(
                                  upper_bound,
                                  ps.shape(self.outcomes)[-1] - 1))
     should_use_upper_bound = self._is_equal_or_close(flat_x, values_at_ub)
     indices = tf.where(should_use_upper_bound, upper_bound,
                        upper_bound - 1)
     indices = tf.reshape(indices, shape=dist_util.prefer_static_shape(x))
     indices_non_negative = tf.where(tf.equal(indices, -1),
                                     tf.zeros([], indices.dtype), indices)
     cdf = self._categorical.cdf(indices_non_negative)
     return tf.where(tf.equal(indices, -1), tf.zeros([], cdf.dtype), cdf)
Exemplo n.º 11
0
def _mul_right(mat, vec):
    """Computes the product of a square matrix with a vector on the right.

  Note this accepts a generalized square matrix `M`, i.e. of shape `s + s`
  with `rank(s) >= 1`, a generalized vector `v` of shape `s`, and computes
  the product `M.v` (also of shape `s`).

  Furthermore, the shapes may be fully dynamic.

  Examples:

    v = tf.constant([0, 1])
    M = tf.constant([[0, 1], [2, 3]])
    _mul_right(M, v)
    # => [1, 3]

    v = tf.reshape(tf.range(6), shape=(2, 3))
    # => [[0, 1, 2],
    #     [3, 4, 5]]
    M = tf.reshape(tf.range(36), shape=(2, 3, 2, 3))
    _mul_right(M, v)
    # => [[ 55, 145, 235],
    #     [325, 415, 505]]

  Args:
    mat: A `tf.Tensor` of shape `s + s`.
    vec: A `tf.Tensor` of shape `s`.

  Returns:
    A tensor with the result of the product (also of shape `s`).
  """
    contraction_axes = tf.range(-distribution_util.prefer_static_rank(vec), 0)
    result = tf.tensordot(mat,
                          vec,
                          axes=tf.stack([contraction_axes, contraction_axes]))
    # This last reshape is needed to help with inference about the shape
    # information, otherwise a partially-known shape would become completely
    # unknown.
    return tf.reshape(result, distribution_util.prefer_static_shape(vec))
Exemplo n.º 12
0
def _mul_right(mat, vec):
  """Computes the product of a square matrix with a vector on the right.

  Note this accepts a generalized square matrix `M`, i.e. of shape `s + s`
  with `rank(s) >= 1`, a generalized vector `v` of shape `s`, and computes
  the product `M.v` (also of shape `s`).

  Furthermore, the shapes may be fully dynamic.

  Examples:

    v = tf.constant([0, 1])
    M = tf.constant([[0, 1], [2, 3]])
    _mul_right(M, v)
    # => [1, 3]

    v = tf.reshape(tf.range(6), shape=(2, 3))
    # => [[0, 1, 2],
    #     [3, 4, 5]]
    M = tf.reshape(tf.range(36), shape=(2, 3, 2, 3))
    _mul_right(M, v)
    # => [[ 55, 145, 235],
    #     [325, 415, 505]]

  Args:
    mat: A `tf.Tensor` of shape `s + s`.
    vec: A `tf.Tensor` of shape `s`.

  Returns:
    A tensor with the result of the product (also of shape `s`).
  """
  contraction_axes = tf.range(-distribution_util.prefer_static_rank(vec), 0)
  result = tf.tensordot(mat, vec, axes=tf.stack([contraction_axes,
                                                 contraction_axes]))
  # This last reshape is needed to help with inference about the shape
  # information, otherwise a partially-known shape would become completely
  # unknown.
  return tf.reshape(result, distribution_util.prefer_static_shape(vec))
Exemplo n.º 13
0
 def testNonEmptyConstantTensor(self):
     x = tf.zeros((2, 3, 4))
     shape = distribution_util.prefer_static_shape(x)
     self.assertIsInstance(shape, np.ndarray)
     self.assertAllEqual([2, 3, 4], shape)
Exemplo n.º 14
0
def auto_correlation(x,
                     axis=-1,
                     max_lags=None,
                     center=True,
                     normalize=True,
                     name='auto_correlation'):
  """Auto correlation along one axis.

  Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
  `RXX` may be defined as  (with `E` expectation and `Conj` complex conjugate)

  ```
  RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
  W[n]   := (X[n] - MU) / S,
  MU     := E{ X[0] },
  S**2   := E{ (X[0] - MU) Conj(X[0] - MU) }.
  ```

  This function takes the viewpoint that `x` is (along one axis) a finite
  sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
  estimate of `RXX[m]` as follows:

  After extending `x` from length `L` to `inf` by zero padding, the auto
  correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as

  ```
  rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
  w[n]   := (x[n] - mu) / s,
  mu     := L**-1 sum_n x[n],
  s**2   := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
  ```

  The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
  often set `max_lags` small enough so that the entire output is meaningful.

  Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
  `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
  contains a slight bias, which goes to zero as `len(x) - m --> infinity`.

  Args:
    x:  `float32` or `complex64` `Tensor`.
    axis:  Python `int`. The axis number along which to compute correlation.
      Other dimensions index different batch members.
    max_lags:  Positive `int` tensor.  The maximum value of `m` to consider (in
      equation above).  If `max_lags >= x.shape[axis]`, we effectively re-set
      `max_lags` to `x.shape[axis] - 1`.
    center:  Python `bool`.  If `False`, do not subtract the mean estimate `mu`
      from `x[n]` when forming `w[n]`.
    normalize:  Python `bool`.  If `False`, do not divide by the variance
      estimate `s**2` when forming `w[n]`.
    name:  `String` name to prepend to created ops.

  Returns:
    `rxx`: `Tensor` of same `dtype` as `x`.  `rxx.shape[i] = x.shape[i]` for
      `i != axis`, and `rxx.shape[axis] = max_lags + 1`.

  Raises:
    TypeError:  If `x` is not a supported type.
  """
  # Implementation details:
  # Extend length N / 2 1-D array x to length N by zero padding onto the end.
  # Then, set
  #   F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
  # It is not hard to see that
  #   F[x]_k Conj(F[x]_k) = F[R]_k, where
  #   R_m := sum_n x_n Conj(x_{(n - m) mod N}).
  # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].

  # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
  # based version of estimating RXX.
  # Note that this is a special case of the Wiener-Khinchin Theorem.
  with tf.name_scope(name, values=[x]):
    x = tf.convert_to_tensor(x, name='x')

    # Rotate dimensions of x in order to put axis at the rightmost dim.
    # FFT op requires this.
    rank = util.prefer_static_rank(x)
    if axis < 0:
      axis = rank + axis
    shift = rank - 1 - axis
    # Suppose x.shape[axis] = T, so there are T 'time' steps.
    #   ==> x_rotated.shape = B + [T],
    # where B is x_rotated's batch shape.
    x_rotated = util.rotate_transpose(x, shift)

    if center:
      x_rotated -= tf.reduce_mean(x_rotated, axis=-1, keepdims=True)

    # x_len = N / 2 from above explanation.  The length of x along axis.
    # Get a value for x_len that works in all cases.
    x_len = util.prefer_static_shape(x_rotated)[-1]

    # TODO(langmore) Investigate whether this zero padding helps or hurts.  At
    # the moment is necessary so that all FFT implementations work.
    # Zero pad to the next power of 2 greater than 2 * x_len, which equals
    # 2**(ceil(Log_2(2 * x_len))).  Note: Log_2(X) = Log_e(X) / Log_e(2).
    x_len_float64 = tf.cast(x_len, np.float64)
    target_length = tf.pow(
        np.float64(2.), tf.ceil(tf.log(x_len_float64 * 2) / np.log(2.)))
    pad_length = tf.cast(target_length - x_len_float64, np.int32)

    # We should have:
    # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
    #                     = B + [T + pad_length]
    x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)

    dtype = x.dtype
    if not dtype.is_complex:
      if not dtype.is_floating:
        raise TypeError('Argument x must have either float or complex dtype'
                        ' found: {}'.format(dtype))
      x_rotated_pad = tf.complex(x_rotated_pad,
                                 dtype.real_dtype.as_numpy_dtype(0.))

    # Autocorrelation is IFFT of power-spectral density (up to some scaling).
    fft_x_rotated_pad = tf.fft(x_rotated_pad)
    spectral_density = fft_x_rotated_pad * tf.conj(fft_x_rotated_pad)
    # shifted_product is R[m] from above detailed explanation.
    # It is the inner product sum_n X[n] * Conj(X[n - m]).
    shifted_product = tf.ifft(spectral_density)

    # Cast back to real-valued if x was real to begin with.
    shifted_product = tf.cast(shifted_product, dtype)

    # Figure out if we can deduce the final static shape, and set max_lags.
    # Use x_rotated as a reference, because it has the time dimension in the far
    # right, and was created before we performed all sorts of crazy shape
    # manipulations.
    know_static_shape = True
    if not x_rotated.shape.is_fully_defined():
      know_static_shape = False
    if max_lags is None:
      max_lags = x_len - 1
    else:
      max_lags = tf.convert_to_tensor(max_lags, name='max_lags')
      max_lags_ = tf.contrib.util.constant_value(max_lags)
      if max_lags_ is None or not know_static_shape:
        know_static_shape = False
        max_lags = tf.minimum(x_len - 1, max_lags)
      else:
        max_lags = min(x_len - 1, max_lags_)

    # Chop off the padding.
    # We allow users to provide a huge max_lags, but cut it off here.
    # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
    shifted_product_chopped = shifted_product[..., :max_lags + 1]

    # If possible, set shape.
    if know_static_shape:
      chopped_shape = x_rotated.shape.as_list()
      chopped_shape[-1] = min(x_len, max_lags + 1)
      shifted_product_chopped.set_shape(chopped_shape)

    # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]).  The
    # other terms were zeros arising only due to zero padding.
    # `denominator = (N / 2 - m)` (defined below) is the proper term to
    # divide by to make this an unbiased estimate of the expectation
    # E[X[n] Conj(X[n - m])].
    x_len = tf.cast(x_len, dtype.real_dtype)
    max_lags = tf.cast(max_lags, dtype.real_dtype)
    denominator = x_len - tf.range(0., max_lags + 1.)
    denominator = tf.cast(denominator, dtype)
    shifted_product_rotated = shifted_product_chopped / denominator

    if normalize:
      shifted_product_rotated /= shifted_product_rotated[..., :1]

    # Transpose dimensions back to those of x.
    return util.rotate_transpose(shifted_product_rotated, -shift)
Exemplo n.º 15
0
def auto_correlation(x,
                     axis=-1,
                     max_lags=None,
                     center=True,
                     normalize=True,
                     name='auto_correlation'):
  """Auto correlation along one axis.

  Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
  `RXX` may be defined as  (with `E` expectation and `Conj` complex conjugate)

  ```
  RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
  W[n]   := (X[n] - MU) / S,
  MU     := E{ X[0] },
  S**2   := E{ (X[0] - MU) Conj(X[0] - MU) }.
  ```

  This function takes the viewpoint that `x` is (along one axis) a finite
  sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
  estimate of `RXX[m]` as follows:

  After extending `x` from length `L` to `inf` by zero padding, the auto
  correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as

  ```
  rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
  w[n]   := (x[n] - mu) / s,
  mu     := L**-1 sum_n x[n],
  s**2   := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
  ```

  The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
  often set `max_lags` small enough so that the entire output is meaningful.

  Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
  `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
  contains a slight bias, which goes to zero as `len(x) - m --> infinity`.

  Args:
    x:  `float32` or `complex64` `Tensor`.
    axis:  Python `int`. The axis number along which to compute correlation.
      Other dimensions index different batch members.
    max_lags:  Positive `int` tensor.  The maximum value of `m` to consider (in
      equation above).  If `max_lags >= x.shape[axis]`, we effectively re-set
      `max_lags` to `x.shape[axis] - 1`.
    center:  Python `bool`.  If `False`, do not subtract the mean estimate `mu`
      from `x[n]` when forming `w[n]`.
    normalize:  Python `bool`.  If `False`, do not divide by the variance
      estimate `s**2` when forming `w[n]`.
    name:  `String` name to prepend to created ops.

  Returns:
    `rxx`: `Tensor` of same `dtype` as `x`.  `rxx.shape[i] = x.shape[i]` for
      `i != axis`, and `rxx.shape[axis] = max_lags + 1`.

  Raises:
    TypeError:  If `x` is not a supported type.
  """
  # Implementation details:
  # Extend length N / 2 1-D array x to length N by zero padding onto the end.
  # Then, set
  #   F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
  # It is not hard to see that
  #   F[x]_k Conj(F[x]_k) = F[R]_k, where
  #   R_m := sum_n x_n Conj(x_{(n - m) mod N}).
  # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].

  # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
  # based version of estimating RXX.
  # Note that this is a special case of the Wiener-Khinchin Theorem.
  with tf.name_scope(name, values=[x]):
    x = tf.convert_to_tensor(value=x, name='x')

    # Rotate dimensions of x in order to put axis at the rightmost dim.
    # FFT op requires this.
    rank = util.prefer_static_rank(x)
    if axis < 0:
      axis = rank + axis
    shift = rank - 1 - axis
    # Suppose x.shape[axis] = T, so there are T 'time' steps.
    #   ==> x_rotated.shape = B + [T],
    # where B is x_rotated's batch shape.
    x_rotated = util.rotate_transpose(x, shift)

    if center:
      x_rotated -= tf.reduce_mean(
          input_tensor=x_rotated, axis=-1, keepdims=True)

    # x_len = N / 2 from above explanation.  The length of x along axis.
    # Get a value for x_len that works in all cases.
    x_len = util.prefer_static_shape(x_rotated)[-1]

    # TODO(langmore) Investigate whether this zero padding helps or hurts.  At
    # the moment is necessary so that all FFT implementations work.
    # Zero pad to the next power of 2 greater than 2 * x_len, which equals
    # 2**(ceil(Log_2(2 * x_len))).  Note: Log_2(X) = Log_e(X) / Log_e(2).
    x_len_float64 = tf.cast(x_len, np.float64)
    target_length = tf.pow(
        np.float64(2.), tf.math.ceil(
            tf.math.log(x_len_float64 * 2) / np.log(2.)))
    pad_length = tf.cast(target_length - x_len_float64, np.int32)

    # We should have:
    # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
    #                     = B + [T + pad_length]
    x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)

    dtype = x.dtype
    if not dtype.is_complex:
      if not dtype.is_floating:
        raise TypeError('Argument x must have either float or complex dtype'
                        ' found: {}'.format(dtype))
      x_rotated_pad = tf.complex(x_rotated_pad,
                                 dtype.real_dtype.as_numpy_dtype(0.))

    # Autocorrelation is IFFT of power-spectral density (up to some scaling).
    fft_x_rotated_pad = tf.signal.fft(x_rotated_pad)
    spectral_density = fft_x_rotated_pad * tf.math.conj(fft_x_rotated_pad)
    # shifted_product is R[m] from above detailed explanation.
    # It is the inner product sum_n X[n] * Conj(X[n - m]).
    shifted_product = tf.signal.ifft(spectral_density)

    # Cast back to real-valued if x was real to begin with.
    shifted_product = tf.cast(shifted_product, dtype)

    # Figure out if we can deduce the final static shape, and set max_lags.
    # Use x_rotated as a reference, because it has the time dimension in the far
    # right, and was created before we performed all sorts of crazy shape
    # manipulations.
    know_static_shape = True
    if not x_rotated.shape.is_fully_defined():
      know_static_shape = False
    if max_lags is None:
      max_lags = x_len - 1
    else:
      max_lags = tf.convert_to_tensor(value=max_lags, name='max_lags')
      max_lags_ = tf.get_static_value(max_lags)
      if max_lags_ is None or not know_static_shape:
        know_static_shape = False
        max_lags = tf.minimum(x_len - 1, max_lags)
      else:
        max_lags = min(x_len - 1, max_lags_)

    # Chop off the padding.
    # We allow users to provide a huge max_lags, but cut it off here.
    # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
    shifted_product_chopped = shifted_product[..., :max_lags + 1]

    # If possible, set shape.
    if know_static_shape:
      chopped_shape = x_rotated.shape.as_list()
      chopped_shape[-1] = min(x_len, max_lags + 1)
      shifted_product_chopped.set_shape(chopped_shape)

    # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]).  The
    # other terms were zeros arising only due to zero padding.
    # `denominator = (N / 2 - m)` (defined below) is the proper term to
    # divide by to make this an unbiased estimate of the expectation
    # E[X[n] Conj(X[n - m])].
    x_len = tf.cast(x_len, dtype.real_dtype)
    max_lags = tf.cast(max_lags, dtype.real_dtype)
    denominator = x_len - tf.range(0., max_lags + 1.)
    denominator = tf.cast(denominator, dtype)
    shifted_product_rotated = shifted_product_chopped / denominator

    if normalize:
      shifted_product_rotated /= shifted_product_rotated[..., :1]

    # Transpose dimensions back to those of x.
    return util.rotate_transpose(shifted_product_rotated, -shift)
Exemplo n.º 16
0
def _queue_push(queue, should_update, new_vecs):
    """Conditionally push new vectors into a batch of first-in-first-out queues.

  The `queue` of shape `[k, ..., n]` can be thought of as a batch of queues,
  each holding `k` n-D vectors; while `new_vecs` of shape `[..., n]` is a
  fresh new batch of n-D vectors. The `should_update` batch of Boolean scalars,
  i.e. shape `[...]`, indicates batch members whose corresponding n-D vector in
  `new_vecs` should be added at the back of its queue, pushing out the
  corresponding n-D vector from the front. Batch members in `new_vecs` for
  which `should_update` is False are ignored.

  Note: the choice of placing `k` at the dimension 0 of the queue is
  constrained by the L-BFGS two-loop algorithm above. The algorithm uses
  tf.scan to iterate over the `k` correction pairs simulatneously across all
  batches, and tf.scan itself can only iterate over dimension 0.

  For example:

  ```python
    k, b, n = (3, 2, 5)
    queue = tf.reshape(tf.range(30), (k, b, n))
    # => [[[ 0,  1,  2,  3,  4],
    #      [ 5,  6,  7,  8,  9]],
    #
    #     [[10, 11, 12, 13, 14],
    #      [15, 16, 17, 18, 19]],
    #
    #     [[20, 21, 22, 23, 24],
    #      [25, 26, 27, 28, 29]]]

    element = tf.reshape(tf.range(30, 40), (b, n))
    # => [[30, 31, 32, 33, 34],
          [35, 36, 37, 38, 39]]

    should_update = tf.constant([True, False])  # Shape: (b,)

    _queue_add(should_update, queue, element)
    # => [[[10, 11, 12, 13, 14],
    #      [ 5,  6,  7,  8,  9]],
    #
    #     [[20, 21, 22, 23, 24],
    #      [15, 16, 17, 18, 19]],
    #
    #     [[30, 31, 32, 33, 34],
    #      [25, 26, 27, 28, 29]]]
  ```

  Args:
    queue: A `tf.Tensor` of shape `[k, ..., n]`; a batch of queues each with
      `k` n-D vectors.
    should_update: A Boolean `tf.Tensor` of shape `[...]` indicating batch
      members where new vectors should be added to their queues.
    new_vecs: A `tf.Tensor` of shape `[..., n]`; a batch of n-D vectors to add
      at the end of their respective queues, pushing out the first element from
      each.

  Returns:
    A new `tf.Tensor` of shape `[k, ..., n]`.
  """
    new_queue = tf.concat([queue[1:], [new_vecs]], axis=0)
    update_pattern = tf.broadcast_to(
        should_update[tf.newaxis, ..., tf.newaxis],
        distribution_util.prefer_static_shape(queue))
    return tf1.where(update_pattern, new_queue, queue)
Exemplo n.º 17
0
def minimize(value_and_gradients_function,
             initial_position,
             tolerance=1e-8,
             x_tolerance=0,
             f_relative_tolerance=0,
             initial_inverse_hessian_estimate=None,
             max_iterations=50,
             parallel_iterations=1,
             name=None):
  """Applies the BFGS algorithm to minimize a differentiable function.

  Performs unconstrained minimization of a differentiable function using the
  BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1].

  ### Usage:

  The following example demonstrates the BFGS optimizer attempting to find the
  minimum for a simple two dimensional quadratic objective function.

  ```python
    minimum = np.array([1.0, 1.0])  # The center of the quadratic bowl.
    scales = np.array([2.0, 3.0])  # The scales along the two axes.

    # The objective function and the gradient.
    def quadratic(x):
      value = tf.reduce_sum(scales * (x - minimum) ** 2)
      return value, tf.gradients(value, x)[0]

    start = tf.constant([0.6, 0.8])  # Starting point for the search.
    optim_results = tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8)

    with tf.Session() as session:
      results = session.run(optim_results)
      # Check that the search converged
      assert(results.converged)
      # Check that the argmin is close to the actual value.
      np.testing.assert_allclose(results.position, minimum)
      # Print out the total number of function evaluations it took. Should be 6.
      print ("Function evaluations: %d" % results.num_objective_evaluations)
  ```

  ### References:
  [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
    Operations Research. pp 136-140. 2006
    http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf

  Args:
    value_and_gradients_function:  A Python callable that accepts a point as a
      real `Tensor` and returns a tuple of `Tensor`s of real dtype containing
      the value of the function and its gradient at that point. The function
      to be minimized. The first component of the return value should be a
      real scalar `Tensor`. The second component (the gradient) should have the
      same shape as the input value to the function.
    initial_position: `Tensor` of real dtype. The starting point of the search
      procedure. Should be a point at which the function value and the gradient
      norm are finite.
    tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance
      for the procedure. If the supremum norm of the gradient vector is below
      this number, the algorithm is stopped.
    x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the
      position between one iteration and the next is smaller than this number,
      the algorithm is stopped.
    f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change
      in the objective value between one iteration and the next is smaller
      than this value, the algorithm is stopped.
    initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype
      as the components of the output of the `value_and_gradients_function`.
      If specified, the shape should be `initial_position.shape` * 2.
      For example, if the shape of `initial_position` is `[n]`, then the
      acceptable shape of `initial_inverse_hessian_estimate` is as a square
      matrix of shape `[n, n]`.
      If the shape of `initial_position` is `[n, m]`, then the required shape
      is `[n, m, n, m]`.
      For the correctness of the algorithm, it is required that this parameter
      be symmetric and positive definite. Specifies the starting estimate for
      the inverse of the Hessian at the initial point. If not specified,
      the identity matrix is used as the starting estimate for the
      inverse Hessian.
    max_iterations: Scalar positive int32 `Tensor`. The maximum number of
      iterations for BFGS updates.
    parallel_iterations: Positive integer. The number of iterations allowed to
      run in parallel.
    name: (Optional) Python str. The name prefixed to the ops created by this
      function. If not supplied, the default name 'minimize' is used.

  Returns:
    optimizer_results: A namedtuple containing the following items:
      converged: Scalar boolean tensor indicating whether the minimum was
        found within tolerance.
      failed:  Scalar boolean tensor indicating whether a line search
        step failed to find a suitable step size satisfying Wolfe
        conditions. In the absence of any constraints on the
        number of objective evaluations permitted, this value will
        be the complement of `converged`. However, if there is
        a constraint and the search stopped due to available
        evaluations being exhausted, both `failed` and `converged`
        will be simultaneously False.
      num_objective_evaluations: The total number of objective
        evaluations performed.
      position: A tensor containing the last argument value found
        during the search. If the search converged, then
        this value is the argmin of the objective function.
      objective_value: A tensor containing the value of the objective
        function at the `position`. If the search converged, then this is
        the (local) minimum of the objective function.
      objective_gradient: A tensor containing the gradient of the objective
        function at the `position`. If the search converged the
        max-norm of this tensor should be below the tolerance.
      inverse_hessian_estimate: A tensor containing the inverse of the
        estimated Hessian.
  """
  with tf.name_scope(name, 'minimize', [initial_position,
                                        tolerance,
                                        initial_inverse_hessian_estimate]):
    initial_position = tf.convert_to_tensor(initial_position,
                                            name='initial_position')
    dtype = initial_position.dtype.base_dtype
    tolerance = tf.convert_to_tensor(tolerance, dtype=dtype,
                                     name='grad_tolerance')
    f_relative_tolerance = tf.convert_to_tensor(f_relative_tolerance,
                                                dtype=dtype,
                                                name='f_relative_tolerance')
    x_tolerance = tf.convert_to_tensor(x_tolerance,
                                       dtype=dtype,
                                       name='x_tolerance')
    max_iterations = tf.convert_to_tensor(max_iterations, name='max_iterations')

    domain_shape = distribution_util.prefer_static_shape(initial_position)

    if initial_inverse_hessian_estimate is None:
      inv_hessian_shape = tf.concat([domain_shape, domain_shape], 0)
      initial_inv_hessian = tf.eye(tf.size(initial_position), dtype=dtype)
      initial_inv_hessian = tf.reshape(initial_inv_hessian,
                                       inv_hessian_shape,
                                       name='initial_inv_hessian')
    else:
      initial_inv_hessian = tf.convert_to_tensor(
          initial_inverse_hessian_estimate,
          dtype=dtype,
          name='initial_inv_hessian')

    # If an initial inverse Hessian is supplied, ensure that it is positive
    # definite. The easiest way to validate this is to compute the Cholesky
    # decomposition. However, it seems that simply adding a control dependency
    # on the decomposition result is not enough to trigger it. We need to
    # add an assert on the result.
    if initial_inverse_hessian_estimate is not None:
      # The supplied Hessian may not be of rank 2. Reshape it so it is.
      initial_inv_hessian_sqr_mat = tf.reshape(
          initial_inverse_hessian_estimate,
          tf.stack([tf.size(initial_position),
                    tf.size(initial_position)], axis=0))
      # If the matrix is not positive definite, the Cholesky decomposition will
      # fail. Adding an assert on it ensures it will be triggered.
      cholesky_factor = tf.cholesky(initial_inv_hessian_sqr_mat)
      is_positive_definite = tf.reduce_all(tf.is_finite(cholesky_factor))
      asymmetry = tf.norm(initial_inv_hessian_sqr_mat -
                          tf.transpose(initial_inv_hessian_sqr_mat), np.inf)
      is_symmetric = tf.equal(asymmetry, 0)
      with tf.control_dependencies(
          [tf.Assert(is_positive_definite,
                     ['Initial inverse Hessian is not positive definite.',
                      initial_inverse_hessian_estimate]),
           tf.Assert(is_symmetric,
                     ['Initial inverse Hessian is not symmetric',
                      initial_inverse_hessian_estimate])]):
        f0, df0 = value_and_gradients_function(initial_position)
    else:
      f0, df0 = value_and_gradients_function(initial_position)

    initial_convergence = _initial_convergence_test(df0, tolerance)

    # The `state` here is a BfgsOptimizerResults tuple with values for the
    # current state of the algorithm computation.
    def _cond(state):
      """Stopping condition for the algorithm."""
      keep_going = tf.logical_not(state.converged | state.failed |
                                  (state.num_iterations >= max_iterations))
      return keep_going

    def _body(state):
      """Main optimization loop."""

      search_direction = _get_search_direction(state.inverse_hessian_estimate,
                                               state.objective_gradient)
      derivative_at_start_pt = tf.reduce_sum(state.objective_gradient *
                                             search_direction)
      # If the derivative at the start point is not negative, reset the
      # Hessian estimate and recompute the search direction.
      needs_reset = derivative_at_start_pt >= 0
      def _reset_search_dirn():
        search_direction = _get_search_direction(initial_inv_hessian,
                                                 state.objective_gradient)
        return search_direction, initial_inv_hessian

      search_direction, inv_hessian_estimate = tf.contrib.framework.smart_cond(
          needs_reset,
          true_fn=_reset_search_dirn,
          false_fn=lambda: (search_direction, state.inverse_hessian_estimate))
      line_search_value_grad_func = _restrict_along_direction(
          value_and_gradients_function, state.position, search_direction)
      derivative_at_start_pt = tf.reduce_sum(state.objective_gradient *
                                             search_direction)

      ls_result = linesearch.hager_zhang(
          line_search_value_grad_func,
          initial_step_size=tf.convert_to_tensor(1, dtype=dtype),
          objective_at_zero=state.objective_value,
          grad_objective_at_zero=derivative_at_start_pt)

      state_after_ls = _update_state(
          state,
          failed=~ls_result.converged,  # Fail if line search failed.
          num_iterations=state.num_iterations + 1,
          num_objective_evaluations=(
              state.num_objective_evaluations + ls_result.func_evals),
          inverse_hessian_estimate=inv_hessian_estimate)

      def _do_bfgs_update():
        state_updated = _update_position(
            value_and_gradients_function,
            state_after_ls,
            search_direction * ls_result.left_pt,
            tolerance, f_relative_tolerance, x_tolerance)

        # If not converged, update the Hessian.
        return tf.contrib.framework.smart_cond(
            state_updated.converged,
            lambda: state_updated,
            lambda: _update_inv_hessian(state_after_ls, state_updated))

      next_state = tf.contrib.framework.smart_cond(
          state_after_ls.failed,
          true_fn=lambda: state_after_ls,
          false_fn=_do_bfgs_update)
      return [next_state]

    initial_state = BfgsOptimizerResults(
        converged=initial_convergence,
        failed=False,
        num_iterations=tf.convert_to_tensor(0),
        num_objective_evaluations=1,
        position=initial_position,
        objective_value=f0,
        objective_gradient=df0,
        inverse_hessian_estimate=initial_inv_hessian)

    return tf.while_loop(_cond, _body, [initial_state],
                         parallel_iterations=parallel_iterations)[0]
Exemplo n.º 18
0
def minimize(value_and_gradients_function,
             initial_position,
             tolerance=1e-8,
             x_tolerance=0,
             f_relative_tolerance=0,
             initial_inverse_hessian_estimate=None,
             max_iterations=50,
             parallel_iterations=1,
             name=None):
  """Applies the BFGS algorithm to minimize a differentiable function.

  Performs unconstrained minimization of a differentiable function using the
  BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1].

  ### Usage:

  The following example demonstrates the BFGS optimizer attempting to find the
  minimum for a simple two dimensional quadratic objective function.

  ```python
    minimum = np.array([1.0, 1.0])  # The center of the quadratic bowl.
    scales = np.array([2.0, 3.0])  # The scales along the two axes.

    # The objective function and the gradient.
    def quadratic(x):
      value = tf.reduce_sum(scales * (x - minimum) ** 2)
      return value, tf.gradients(value, x)[0]

    start = tf.constant([0.6, 0.8])  # Starting point for the search.
    optim_results = tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8)

    with tf.Session() as session:
      results = session.run(optim_results)
      # Check that the search converged
      assert(results.converged)
      # Check that the argmin is close to the actual value.
      np.testing.assert_allclose(results.position, minimum)
      # Print out the total number of function evaluations it took. Should be 6.
      print ("Function evaluations: %d" % results.num_objective_evaluations)
  ```

  ### References:
  [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
    Operations Research. pp 136-140. 2006
    http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf

  Args:
    value_and_gradients_function:  A Python callable that accepts a point as a
      real `Tensor` and returns a tuple of `Tensor`s of real dtype containing
      the value of the function and its gradient at that point. The function
      to be minimized. The first component of the return value should be a
      real scalar `Tensor`. The second component (the gradient) should have the
      same shape as the input value to the function.
    initial_position: `Tensor` of real dtype. The starting point of the search
      procedure. Should be a point at which the function value and the gradient
      norm are finite.
    tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance
      for the procedure. If the supremum norm of the gradient vector is below
      this number, the algorithm is stopped.
    x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the
      position between one iteration and the next is smaller than this number,
      the algorithm is stopped.
    f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change
      in the objective value between one iteration and the next is smaller
      than this value, the algorithm is stopped.
    initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype
      as the components of the output of the `value_and_gradients_function`.
      If specified, the shape should be `initial_position.shape` * 2.
      For example, if the shape of `initial_position` is `[n]`, then the
      acceptable shape of `initial_inverse_hessian_estimate` is as a square
      matrix of shape `[n, n]`.
      If the shape of `initial_position` is `[n, m]`, then the required shape
      is `[n, m, n, m]`.
      For the correctness of the algorithm, it is required that this parameter
      be symmetric and positive definite. Specifies the starting estimate for
      the inverse of the Hessian at the initial point. If not specified,
      the identity matrix is used as the starting estimate for the
      inverse Hessian.
    max_iterations: Scalar positive int32 `Tensor`. The maximum number of
      iterations for BFGS updates.
    parallel_iterations: Positive integer. The number of iterations allowed to
      run in parallel.
    name: (Optional) Python str. The name prefixed to the ops created by this
      function. If not supplied, the default name 'minimize' is used.

  Returns:
    optimizer_results: A namedtuple containing the following items:
      converged: Scalar boolean tensor indicating whether the minimum was
        found within tolerance.
      failed:  Scalar boolean tensor indicating whether a line search
        step failed to find a suitable step size satisfying Wolfe
        conditions. In the absence of any constraints on the
        number of objective evaluations permitted, this value will
        be the complement of `converged`. However, if there is
        a constraint and the search stopped due to available
        evaluations being exhausted, both `failed` and `converged`
        will be simultaneously False.
      num_objective_evaluations: The total number of objective
        evaluations performed.
      position: A tensor containing the last argument value found
        during the search. If the search converged, then
        this value is the argmin of the objective function.
      objective_value: A tensor containing the value of the objective
        function at the `position`. If the search converged, then this is
        the (local) minimum of the objective function.
      objective_gradient: A tensor containing the gradient of the objective
        function at the `position`. If the search converged the
        max-norm of this tensor should be below the tolerance.
      inverse_hessian_estimate: A tensor containing the inverse of the
        estimated Hessian.
  """
  with tf.name_scope(name, 'minimize', [initial_position,
                                        tolerance,
                                        initial_inverse_hessian_estimate]):
    initial_position = tf.convert_to_tensor(initial_position,
                                            name='initial_position')
    dtype = initial_position.dtype.base_dtype
    tolerance = tf.convert_to_tensor(tolerance, dtype=dtype,
                                     name='grad_tolerance')
    f_relative_tolerance = tf.convert_to_tensor(f_relative_tolerance,
                                                dtype=dtype,
                                                name='f_relative_tolerance')
    x_tolerance = tf.convert_to_tensor(x_tolerance,
                                       dtype=dtype,
                                       name='x_tolerance')
    max_iterations = tf.convert_to_tensor(max_iterations, name='max_iterations')

    domain_shape = distribution_util.prefer_static_shape(initial_position)

    if initial_inverse_hessian_estimate is None:
      inv_hessian_shape = tf.concat([domain_shape, domain_shape], 0)
      initial_inv_hessian = tf.eye(tf.size(initial_position), dtype=dtype)
      initial_inv_hessian = tf.reshape(initial_inv_hessian,
                                       inv_hessian_shape,
                                       name='initial_inv_hessian')
    else:
      initial_inv_hessian = tf.convert_to_tensor(
          initial_inverse_hessian_estimate,
          dtype=dtype,
          name='initial_inv_hessian')

    # If an initial inverse Hessian is supplied, ensure that it is positive
    # definite. The easiest way to validate this is to compute the Cholesky
    # decomposition. However, it seems that simply adding a control dependency
    # on the decomposition result is not enough to trigger it. We need to
    # add an assert on the result.
    if initial_inverse_hessian_estimate is not None:
      # The supplied Hessian may not be of rank 2. Reshape it so it is.
      initial_inv_hessian_sqr_mat = tf.reshape(
          initial_inverse_hessian_estimate,
          tf.stack([tf.size(initial_position),
                    tf.size(initial_position)], axis=0))
      # If the matrix is not positive definite, the Cholesky decomposition will
      # fail. Adding an assert on it ensures it will be triggered.
      cholesky_factor = tf.cholesky(initial_inv_hessian_sqr_mat)
      is_positive_definite = tf.reduce_all(tf.is_finite(cholesky_factor))
      asymmetry = tf.norm(initial_inv_hessian_sqr_mat -
                          tf.transpose(initial_inv_hessian_sqr_mat), np.inf)
      is_symmetric = tf.equal(asymmetry, 0)
      with tf.control_dependencies(
          [tf.Assert(is_positive_definite,
                     ['Initial inverse Hessian is not positive definite.',
                      initial_inverse_hessian_estimate]),
           tf.Assert(is_symmetric,
                     ['Initial inverse Hessian is not symmetric',
                      initial_inverse_hessian_estimate])]):
        f0, df0 = value_and_gradients_function(initial_position)
    else:
      f0, df0 = value_and_gradients_function(initial_position)

    initial_convergence = _initial_convergence_test(df0, tolerance)

    def _cond(converged,
              failed,
              iteration,
              *ignored_args):  # pylint: disable=unused-argument
      """Stopping condition for the algorithm."""
      keep_going = tf.logical_not(converged | failed |
                                  (iteration >= max_iterations))
      return keep_going

    def _body(converged,  # pylint: disable=unused-argument
              stopped,  # pylint: disable=unused-argument
              iteration,
              total_evals,
              position,
              objective_value,
              objective_gradient,
              input_inv_hessian_estimate):
      """Main optimization loop."""

      search_direction = _get_search_direction(input_inv_hessian_estimate,
                                               objective_gradient)
      derivative_at_start_pt = tf.reduce_sum(objective_gradient *
                                             search_direction)
      # If the derivative at the start point is not negative, reset the
      # Hessian estimate and recompute the search direction.
      needs_reset = derivative_at_start_pt >= 0
      def _reset_search_dirn():
        search_direction = _get_search_direction(initial_inv_hessian,
                                                 objective_gradient)
        return search_direction, initial_inv_hessian

      search_direction, inv_hessian_estimate = tf.contrib.framework.smart_cond(
          needs_reset,
          true_fn=_reset_search_dirn,
          false_fn=lambda: (search_direction, input_inv_hessian_estimate))
      line_search_value_grad_func = _restrict_along_direction(
          value_and_gradients_function, position, search_direction)
      derivative_at_start_pt = tf.reduce_sum(objective_gradient *
                                             search_direction)

      ls_result = linesearch.hager_zhang(
          line_search_value_grad_func,
          initial_step_size=tf.convert_to_tensor(1, dtype=dtype),
          objective_at_zero=objective_value,
          grad_objective_at_zero=derivative_at_start_pt)

      # Fail if the objective value is not finite or the line search failed.
      ls_failed = ~ls_result.converged

      # If the line search failed, then quit at this point.
      def _failed_fn():
        """Line search failed action."""
        failed_retval = BfgsOptimizerResults(
            converged=False,
            failed=True,
            num_iterations=iteration + 1,
            num_objective_evaluations=total_evals + ls_result.func_evals,
            position=position,
            objective_value=objective_value,
            objective_gradient=objective_gradient,
            inverse_hessian_estimate=inv_hessian_estimate)
        return failed_retval

      def _success_fn():
        return _bfgs_update(value_and_gradients_function,
                            position,
                            objective_value,
                            objective_gradient,
                            search_direction,
                            inv_hessian_estimate,
                            ls_result.left_pt,
                            iteration,
                            total_evals + ls_result.func_evals,
                            tolerance,
                            f_relative_tolerance,
                            x_tolerance)

      return tf.contrib.framework.smart_cond(
          ls_failed,
          true_fn=_failed_fn,
          false_fn=_success_fn)

    initial_values = BfgsOptimizerResults(
        converged=initial_convergence,
        failed=False,
        num_iterations=tf.convert_to_tensor(0),
        num_objective_evaluations=1,
        position=initial_position,
        objective_value=f0,
        objective_gradient=df0,
        inverse_hessian_estimate=initial_inv_hessian)

    return tf.while_loop(_cond, _body, initial_values,
                         parallel_iterations=parallel_iterations)
Exemplo n.º 19
0
def minimize(value_and_gradients_function,
             initial_position,
             tolerance=1e-8,
             x_tolerance=0,
             f_relative_tolerance=0,
             initial_inverse_hessian_estimate=None,
             max_iterations=50,
             parallel_iterations=1,
             name=None):
    """Applies the BFGS algorithm to minimize a differentiable function.

  Performs unconstrained minimization of a differentiable function using the
  BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1].

  ### Usage:

  The following example demonstrates the BFGS optimizer attempting to find the
  minimum for a simple two dimensional quadratic objective function.

  ```python
    minimum = np.array([1.0, 1.0])  # The center of the quadratic bowl.
    scales = np.array([2.0, 3.0])  # The scales along the two axes.

    # The objective function and the gradient.
    def quadratic(x):
      value = tf.reduce_sum(scales * (x - minimum) ** 2)
      return value, tf.gradients(value, x)[0]

    start = tf.constant([0.6, 0.8])  # Starting point for the search.
    optim_results = tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8)

    with tf.Session() as session:
      results = session.run(optim_results)
      # Check that the search converged
      assert(results.converged)
      # Check that the argmin is close to the actual value.
      np.testing.assert_allclose(results.position, minimum)
      # Print out the total number of function evaluations it took. Should be 6.
      print ("Function evaluations: %d" % results.num_objective_evaluations)
  ```

  ### References:
  [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
    Operations Research. pp 136-140. 2006
    http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf

  Args:
    value_and_gradients_function:  A Python callable that accepts a point as a
      real `Tensor` and returns a tuple of `Tensor`s of real dtype containing
      the value of the function and its gradient at that point. The function
      to be minimized. The first component of the return value should be a
      real scalar `Tensor`. The second component (the gradient) should have the
      same shape as the input value to the function.
    initial_position: `Tensor` of real dtype. The starting point of the search
      procedure. Should be a point at which the function value and the gradient
      norm are finite.
    tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance
      for the procedure. If the supremum norm of the gradient vector is below
      this number, the algorithm is stopped.
    x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the
      position between one iteration and the next is smaller than this number,
      the algorithm is stopped.
    f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change
      in the objective value between one iteration and the next is smaller
      than this value, the algorithm is stopped.
    initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype
      as the components of the output of the `value_and_gradients_function`.
      If specified, the shape should be `initial_position.shape` * 2.
      For example, if the shape of `initial_position` is `[n]`, then the
      acceptable shape of `initial_inverse_hessian_estimate` is as a square
      matrix of shape `[n, n]`.
      If the shape of `initial_position` is `[n, m]`, then the required shape
      is `[n, m, n, m]`.
      For the correctness of the algorithm, it is required that this parameter
      be symmetric and positive definite. Specifies the starting estimate for
      the inverse of the Hessian at the initial point. If not specified,
      the identity matrix is used as the starting estimate for the
      inverse Hessian.
    max_iterations: Scalar positive int32 `Tensor`. The maximum number of
      iterations for BFGS updates.
    parallel_iterations: Positive integer. The number of iterations allowed to
      run in parallel.
    name: (Optional) Python str. The name prefixed to the ops created by this
      function. If not supplied, the default name 'minimize' is used.

  Returns:
    optimizer_results: A namedtuple containing the following items:
      converged: Scalar boolean tensor indicating whether the minimum was
        found within tolerance.
      failed:  Scalar boolean tensor indicating whether a line search
        step failed to find a suitable step size satisfying Wolfe
        conditions. In the absence of any constraints on the
        number of objective evaluations permitted, this value will
        be the complement of `converged`. However, if there is
        a constraint and the search stopped due to available
        evaluations being exhausted, both `failed` and `converged`
        will be simultaneously False.
      num_objective_evaluations: The total number of objective
        evaluations performed.
      position: A tensor containing the last argument value found
        during the search. If the search converged, then
        this value is the argmin of the objective function.
      objective_value: A tensor containing the value of the objective
        function at the `position`. If the search converged, then this is
        the (local) minimum of the objective function.
      objective_gradient: A tensor containing the gradient of the objective
        function at the `position`. If the search converged the
        max-norm of this tensor should be below the tolerance.
      inverse_hessian_estimate: A tensor containing the inverse of the
        estimated Hessian.
  """
    with tf.name_scope(
            name, 'minimize',
        [initial_position, tolerance, initial_inverse_hessian_estimate]):
        initial_position = tf.convert_to_tensor(initial_position,
                                                name='initial_position')
        dtype = initial_position.dtype.base_dtype
        tolerance = tf.convert_to_tensor(tolerance,
                                         dtype=dtype,
                                         name='grad_tolerance')
        f_relative_tolerance = tf.convert_to_tensor(
            f_relative_tolerance, dtype=dtype, name='f_relative_tolerance')
        x_tolerance = tf.convert_to_tensor(x_tolerance,
                                           dtype=dtype,
                                           name='x_tolerance')
        max_iterations = tf.convert_to_tensor(max_iterations,
                                              name='max_iterations')

        if initial_inverse_hessian_estimate is None:
            # Control inputs are an optional list of tensors to evaluate before
            # the start of the search procedure. These can be used to assert the
            # validity of inputs to the search procedure.
            control_inputs = None
            domain_shape = distribution_util.prefer_static_shape(
                initial_position)
            inv_hessian_shape = tf.concat([domain_shape, domain_shape], 0)
            initial_inv_hessian = tf.eye(tf.size(initial_position),
                                         dtype=dtype)
            initial_inv_hessian = tf.reshape(initial_inv_hessian,
                                             inv_hessian_shape,
                                             name='initial_inv_hessian')
        else:
            # If an initial inverse Hessian is supplied, these control inputs ensure
            # that it is positive definite and symmetric.
            initial_inv_hessian = tf.convert_to_tensor(
                initial_inverse_hessian_estimate,
                dtype=dtype,
                name='initial_inv_hessian')
            control_inputs = _inv_hessian_control_inputs(
                initial_inv_hessian, initial_position)

        # The `state` here is a `BfgsOptimizerResults` tuple with values for the
        # current state of the algorithm computation.
        def _cond(state):
            """Stopping condition for the algorithm."""
            keep_going = tf.logical_not(state.converged | state.failed | (
                state.num_iterations >= max_iterations))
            return keep_going

        def _body(state):
            """Main optimization loop."""

            search_direction = _get_search_direction(
                state.inverse_hessian_estimate, state.objective_gradient)
            derivative_at_start_pt = tf.reduce_sum(state.objective_gradient *
                                                   search_direction)
            # If the derivative at the start point is not negative, reset the
            # Hessian estimate and recompute the search direction.
            needs_reset = derivative_at_start_pt >= 0

            def _reset_search_dirn():
                search_direction = _get_search_direction(
                    initial_inv_hessian, state.objective_gradient)
                return search_direction, initial_inv_hessian

            search_direction, inv_hessian_estimate = tf.contrib.framework.smart_cond(
                needs_reset,
                true_fn=_reset_search_dirn,
                false_fn=lambda:
                (search_direction, state.inverse_hessian_estimate))

            # Replace the hessian estimate in the state, in case it had to be reset.
            current_state = bfgs_utils.update_fields(
                state, inverse_hessian_estimate=inv_hessian_estimate)

            next_state = bfgs_utils.line_search_step(
                current_state, value_and_gradients_function, search_direction,
                tolerance, f_relative_tolerance, x_tolerance)

            # If not failed or converged, update the Hessian.
            state_after_inv_hessian_update = tf.contrib.framework.smart_cond(
                next_state.converged | next_state.failed, lambda: next_state,
                lambda: _update_inv_hessian(current_state, next_state))
            return [state_after_inv_hessian_update]

        kwargs = bfgs_utils.get_initial_state_args(
            value_and_gradients_function, initial_position, tolerance,
            control_inputs)
        kwargs['inverse_hessian_estimate'] = initial_inv_hessian
        initial_state = BfgsOptimizerResults(**kwargs)
        return tf.while_loop(_cond,
                             _body, [initial_state],
                             parallel_iterations=parallel_iterations)[0]
Exemplo n.º 20
0
def minimize(value_and_gradients_function,
             initial_position,
             tolerance=1e-8,
             x_tolerance=0,
             f_relative_tolerance=0,
             initial_inverse_hessian_estimate=None,
             max_iterations=50,
             parallel_iterations=1,
             stopping_condition=None,
             name=None):
  """Applies the BFGS algorithm to minimize a differentiable function.

  Performs unconstrained minimization of a differentiable function using the
  BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1].

  ### Usage:

  The following example demonstrates the BFGS optimizer attempting to find the
  minimum for a simple two dimensional quadratic objective function.

  ```python
    minimum = np.array([1.0, 1.0])  # The center of the quadratic bowl.
    scales = np.array([2.0, 3.0])  # The scales along the two axes.

    # The objective function and the gradient.
    def quadratic(x):
      value = tf.reduce_sum(scales * (x - minimum) ** 2)
      return value, tf.gradients(value, x)[0]

    start = tf.constant([0.6, 0.8])  # Starting point for the search.
    optim_results = tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8)

    with tf.Session() as session:
      results = session.run(optim_results)
      # Check that the search converged
      assert(results.converged)
      # Check that the argmin is close to the actual value.
      np.testing.assert_allclose(results.position, minimum)
      # Print out the total number of function evaluations it took. Should be 6.
      print ("Function evaluations: %d" % results.num_objective_evaluations)
  ```

  ### References:
  [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
    Operations Research. pp 136-140. 2006
    http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf

  Args:
    value_and_gradients_function:  A Python callable that accepts a point as a
      real `Tensor` and returns a tuple of `Tensor`s of real dtype containing
      the value of the function and its gradient at that point. The function
      to be minimized. The input should be of shape `[..., n]`, where `n` is
      the size of the domain of input points, and all others are batching
      dimensions. The first component of the return value should be a real
      `Tensor` of matching shape `[...]`. The second component (the gradient)
      should also be of shape `[..., n]` like the input value to the function.
    initial_position: real `Tensor` of shape `[..., n]`. The starting point, or
      points when using batching dimensions, of the search procedure. At these
      points the function value and the gradient norm should be finite.
    tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance
      for the procedure. If the supremum norm of the gradient vector is below
      this number, the algorithm is stopped.
    x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the
      position between one iteration and the next is smaller than this number,
      the algorithm is stopped.
    f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change
      in the objective value between one iteration and the next is smaller
      than this value, the algorithm is stopped.
    initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype
      as the components of the output of the `value_and_gradients_function`.
      If specified, the shape should broadcastable to shape `[..., n, n]`; e.g.
      if a single `[n, n]` matrix is provided, it will be automatically
      broadcasted to all batches. Alternatively, one can also specify a
      different hessian estimate for each batch member.
      For the correctness of the algorithm, it is required that this parameter
      be symmetric and positive definite. Specifies the starting estimate for
      the inverse of the Hessian at the initial point. If not specified,
      the identity matrix is used as the starting estimate for the
      inverse Hessian.
    max_iterations: Scalar positive int32 `Tensor`. The maximum number of
      iterations for BFGS updates.
    parallel_iterations: Positive integer. The number of iterations allowed to
      run in parallel.
    stopping_condition: (Optional) A Python function that takes as input two
      Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor.
      The input tensors are `converged` and `failed`, indicating the current
      status of each respective batch member; the return value states whether
      the algorithm should stop. The default is tfp.optimizer.converged_all
      which only stops when all batch members have either converged or failed.
      An alternative is tfp.optimizer.converged_any which stops as soon as one
      batch member has converged, or when all have failed.
    name: (Optional) Python str. The name prefixed to the ops created by this
      function. If not supplied, the default name 'minimize' is used.

  Returns:
    optimizer_results: A namedtuple containing the following items:
      converged: boolean tensor of shape `[...]` indicating for each batch
        member whether the minimum was found within tolerance.
      failed:  boolean tensor of shape `[...]` indicating for each batch
        member whether a line search step failed to find a suitable step size
        satisfying Wolfe conditions. In the absence of any constraints on the
        number of objective evaluations permitted, this value will
        be the complement of `converged`. However, if there is
        a constraint and the search stopped due to available
        evaluations being exhausted, both `failed` and `converged`
        will be simultaneously False.
      num_objective_evaluations: The total number of objective
        evaluations performed.
      position: A tensor of shape `[..., n]` containing the last argument value
        found during the search from each starting point. If the search
        converged, then this value is the argmin of the objective function.
      objective_value: A tensor of shape `[...]` with the value of the
        objective function at the `position`. If the search converged, then
        this is the (local) minimum of the objective function.
      objective_gradient: A tensor of shape `[..., n]` containing the gradient
        of the objective function at the `position`. If the search converged
        the max-norm of this tensor should be below the tolerance.
      inverse_hessian_estimate: A tensor of shape `[..., n, n]` containing the
        inverse of the estimated Hessian.
  """
  with tf.compat.v1.name_scope(
      name, 'minimize',
      [initial_position, tolerance, initial_inverse_hessian_estimate]):
    initial_position = tf.convert_to_tensor(
        value=initial_position, name='initial_position')
    dtype = initial_position.dtype.base_dtype
    tolerance = tf.convert_to_tensor(
        value=tolerance, dtype=dtype, name='grad_tolerance')
    f_relative_tolerance = tf.convert_to_tensor(
        value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance')
    x_tolerance = tf.convert_to_tensor(
        value=x_tolerance, dtype=dtype, name='x_tolerance')
    max_iterations = tf.convert_to_tensor(
        value=max_iterations, name='max_iterations')

    input_shape = distribution_util.prefer_static_shape(initial_position)
    batch_shape, domain_size = input_shape[:-1], input_shape[-1]

    if stopping_condition is None:
      stopping_condition = bfgs_utils.converged_all

    # Control inputs are an optional list of tensors to evaluate before
    # the start of the search procedure. These can be used to assert the
    # validity of inputs to the search procedure.
    control_inputs = None

    if initial_inverse_hessian_estimate is None:
      # Create a default initial inverse Hessian.
      initial_inv_hessian = tf.eye(domain_size,
                                   batch_shape=batch_shape,
                                   dtype=dtype,
                                   name='initial_inv_hessian')
    else:
      # If an initial inverse Hessian is supplied, compute some control inputs
      # to ensure that it is positive definite and symmetric.
      initial_inv_hessian = tf.convert_to_tensor(
          value=initial_inverse_hessian_estimate,
          dtype=dtype,
          name='initial_inv_hessian')
      control_inputs = _inv_hessian_control_inputs(initial_inv_hessian)
      hessian_shape = tf.concat([batch_shape, [domain_size, domain_size]], 0)
      initial_inv_hessian = tf.broadcast_to(initial_inv_hessian, hessian_shape)

    # The `state` here is a `BfgsOptimizerResults` tuple with values for the
    # current state of the algorithm computation.
    def _cond(state):
      """Continue if iterations remain and stopping condition is not met."""
      return ((state.num_iterations < max_iterations) &
              tf.logical_not(stopping_condition(state.converged, state.failed)))

    def _body(state):
      """Main optimization loop."""
      search_direction = _get_search_direction(state.inverse_hessian_estimate,
                                               state.objective_gradient)
      derivative_at_start_pt = tf.reduce_sum(
          input_tensor=state.objective_gradient * search_direction, axis=-1)

      # If the derivative at the start point is not negative, recompute the
      # search direction with the initial inverse Hessian.
      needs_reset = (~state.failed & ~state.converged &
                     (derivative_at_start_pt >= 0))

      search_direction_reset = _get_search_direction(
          initial_inv_hessian, state.objective_gradient)

      actual_serch_direction = tf.compat.v1.where(needs_reset,
                                                  search_direction_reset,
                                                  search_direction)
      actual_inv_hessian = tf.compat.v1.where(needs_reset, initial_inv_hessian,
                                              state.inverse_hessian_estimate)

      # Replace the hessian estimate in the state, in case it had to be reset.
      current_state = bfgs_utils.update_fields(
          state, inverse_hessian_estimate=actual_inv_hessian)

      next_state = bfgs_utils.line_search_step(
          current_state,
          value_and_gradients_function, actual_serch_direction,
          tolerance, f_relative_tolerance, x_tolerance, stopping_condition)

      # Update the inverse Hessian if needed and continue.
      return [_update_inv_hessian(current_state, next_state)]

    kwargs = bfgs_utils.get_initial_state_args(
        value_and_gradients_function,
        initial_position,
        tolerance,
        control_inputs)
    kwargs['inverse_hessian_estimate'] = initial_inv_hessian
    initial_state = BfgsOptimizerResults(**kwargs)
    return tf.while_loop(
        cond=_cond,
        body=_body,
        loop_vars=[initial_state],
        parallel_iterations=parallel_iterations)[0]
Exemplo n.º 21
0
def _get_search_direction(state):
    """Computes the search direction to follow at the current state.

  On the `k`-th iteration of the main L-BFGS algorithm, the state has collected
  the most recent `m` correction pairs in position_deltas and gradient_deltas,
  where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`.

  Assuming these, the code below is an implementation of the L-BFGS two-loop
  recursion algorithm given by [Nocedal and Wright(2006)][1]:

  ```None
    q_direction = objective_gradient
    for i in reversed(range(m)):  # First loop.
      inv_rho[i] = gradient_deltas[i]^T * position_deltas[i]
      alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i]
      q_direction = q_direction - alpha[i] * gradient_deltas[i]

    kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] /
                              gradient_deltas[-1]^T * gradient_deltas[-1])
    r_direction = kth_inv_hessian_factor * I * q_direction

    for i in range(m):  # Second loop.
      beta = gradient_deltas[i]^T * r_direction / inv_rho[i]
      r_direction = r_direction + position_deltas[i] * (alpha[i] - beta)

    return -r_direction  # Approximates - H_k * objective_gradient.
  ```

  Args:
    state: A `LBfgsOptimizerResults` tuple with the current state of the
      search procedure.

  Returns:
    A real `Tensor` of the same shape as the `state.position`. The direction
    along which to perform line search.
  """
    # The number of correction pairs that have been collected so far.
    num_elements = tf.minimum(
        state.num_iterations,
        distribution_util.prefer_static_shape(state.position_deltas)[0])

    def _two_loop_algorithm():
        """L-BFGS two-loop algorithm."""
        # Correction pairs are always appended to the end, so only the latest
        # `num_elements` vectors have valid position/gradient deltas.
        position_deltas = state.position_deltas[-num_elements:]
        gradient_deltas = state.gradient_deltas[-num_elements:]

        # Pre-compute all `inv_rho[i]`s.
        inv_rhos = tf.reduce_sum(gradient_deltas * position_deltas, axis=-1)

        def first_loop(acc, args):
            _, q_direction = acc
            position_delta, gradient_delta, inv_rho = args
            alpha = tf.reduce_sum(position_delta * q_direction,
                                  axis=-1) / inv_rho
            direction_delta = alpha[..., tf.newaxis] * gradient_delta
            return (alpha, q_direction - direction_delta)

        # Run first loop body computing and collecting `alpha[i]`s, while also
        # computing the updated `q_direction` at each step.
        zero = tf.zeros_like(inv_rhos[0])
        alphas, q_directions = tf.scan(
            first_loop, [position_deltas, gradient_deltas, inv_rhos],
            initializer=(zero, state.objective_gradient),
            reverse=True)

        # We use `H^0_k = gamma_k * I` as an estimate for the initial inverse
        # hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`.
        gamma_k = inv_rhos[-1] / tf.reduce_sum(
            gradient_deltas[-1] * gradient_deltas[-1], axis=-1)
        r_direction = gamma_k[..., tf.newaxis] * q_directions[0]

        def second_loop(r_direction, args):
            alpha, position_delta, gradient_delta, inv_rho = args
            beta = tf.reduce_sum(gradient_delta * r_direction,
                                 axis=-1) / inv_rho
            direction_delta = (alpha - beta)[..., tf.newaxis] * position_delta
            return r_direction + direction_delta

        # Finally, run second loop body computing the updated `r_direction` at each
        # step.
        r_directions = tf.scan(
            second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos],
            initializer=r_direction)
        return -r_directions[-1]

    return prefer_static.cond(tf.equal(num_elements,
                                       0), (lambda: -state.objective_gradient),
                              _two_loop_algorithm)
Exemplo n.º 22
0
 def testScalarTensor(self):
     x = tf.constant(1.)
     shape = distribution_util.prefer_static_shape(x)
     self.assertIsInstance(shape, np.ndarray)
     self.assertAllEqual([], shape)
Exemplo n.º 23
0
def _prepare_args(target_log_prob_fn,
                  volatility_fn,
                  state,
                  step_size,
                  target_log_prob=None,
                  grads_target_log_prob=None,
                  volatility=None,
                  grads_volatility_fn=None,
                  diffusion_drift=None,
                  parallel_iterations=10):
    """Helper which processes input args to meet list-like assumptions."""
    state_parts = list(state) if mcmc_util.is_list_like(state) else [state]

    [
        target_log_prob,
        grads_target_log_prob,
    ] = mcmc_util.maybe_call_fn_and_grads(target_log_prob_fn, state_parts,
                                          target_log_prob,
                                          grads_target_log_prob)
    [
        volatility_parts,
        grads_volatility,
    ] = _maybe_call_volatility_fn_and_grads(
        volatility_fn, state_parts, volatility, grads_volatility_fn,
        distribution_util.prefer_static_shape(target_log_prob),
        parallel_iterations)

    step_sizes = (list(step_size)
                  if mcmc_util.is_list_like(step_size) else [step_size])
    step_sizes = [
        tf.convert_to_tensor(s, name='step_size', dtype=target_log_prob.dtype)
        for s in step_sizes
    ]
    if len(step_sizes) == 1:
        step_sizes *= len(state_parts)
    if len(state_parts) != len(step_sizes):
        raise ValueError(
            'There should be exactly one `step_size` or it should '
            'have same length as `current_state`.')

    if diffusion_drift is None:
        diffusion_drift_parts = _get_drift(step_sizes, volatility_parts,
                                           grads_volatility,
                                           grads_target_log_prob)
    else:
        diffusion_drift_parts = (list(diffusion_drift)
                                 if mcmc_util.is_list_like(diffusion_drift)
                                 else [diffusion_drift])
        if len(state_parts) != len(diffusion_drift):
            raise ValueError(
                'There should be exactly one `diffusion_drift` or it '
                'should have same length as list-like `current_state`.')

    return [
        state_parts,
        step_sizes,
        target_log_prob,
        grads_target_log_prob,
        volatility_parts,
        grads_volatility,
        diffusion_drift_parts,
    ]
Exemplo n.º 24
0
 def testDynamicShapeEndsUpBeingNonEmpty(self):
     if tf.executing_eagerly(): return
     x = tf1.placeholder_with_default(np.zeros([2, 3], dtype=np.float64),
                                      shape=None)
     shape = distribution_util.prefer_static_shape(x)
     self.assertAllEqual([2, 3], self.evaluate(shape))
Exemplo n.º 25
0
 def _compute_num_samples(self, samples):
     samples_shape = distribution_util.prefer_static_shape(samples)
     return tf.convert_to_tensor(samples_shape[self._samples_axis],
                                 dtype_hint=tf.int32,
                                 name='num_samples')
Exemplo n.º 26
0
 def testDynamicShapeEndsUpBeingScalar(self):
     if tf.executing_eagerly(): return
     x = tf1.placeholder_with_default(np.array(1, dtype=np.int32),
                                      shape=None)
     shape = distribution_util.prefer_static_shape(x)
     self.assertAllEqual([], self.evaluate(shape))