Exemplo n.º 1
0
            def rmspe(labels, predictions, weights=None):
                if context.executing_eagerly():
                    raise RuntimeError('rmspe is not supported '
                                       'when eager execution is enabled.')

                predictions, labels, weights = metrics_impl._remove_squeezable_dimensions(
                    predictions=predictions, labels=labels, weights=weights)
                # The target has been take log1p, so take expm1 back
                labels, predictions = math_ops.expm1(labels), math_ops.expm1(
                    predictions)
                mspe, update_op = metrics_impl.mean(
                    math_ops.square((labels - predictions) / labels), weights)
                rmspe = math_ops.sqrt(mspe)
                rmspe_update_op = math_ops.sqrt(update_op)
                return rmspe, rmspe_update_op
Exemplo n.º 2
0
def pearson(logu, name=None):
  """The Pearson Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Pearson Csiszar-function is:

  ```none
  f(u) = (u - 1)**2
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    pearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
      `u = exp(logu)`.
  """

  with ops.name_scope(name, "pearson", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    return math_ops.square(math_ops.expm1(logu))
Exemplo n.º 3
0
def total_variation(logu, name=None):
    """The Total Variation Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Total-Variation Csiszar-function is:

  ```none
  f(u) = 0.5 |u - 1|
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    total_variation_of_u: `float`-like `Tensor` of the Csiszar-function
      evaluated at `u = exp(logu)`.
  """

    with ops.name_scope(name, "total_variation", [logu]):
        logu = ops.convert_to_tensor(logu, name="logu")
        return 0.5 * math_ops.abs(math_ops.expm1(logu))
def pearson(logu, name=None):
    """The Pearson Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Pearson Csiszar-function is:

  ```none
  f(u) = (u - 1)**2
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    pearson_of_u: Floating-type `Tensor` of the Csiszar-function evaluated at
      `u = exp(logu)`.
  """

    with ops.name_scope(name, "pearson", [logu]):
        logu = ops.convert_to_tensor(logu, name="logu")
        return math_ops.square(math_ops.expm1(logu))
Exemplo n.º 5
0
 def _inverse(self, y):
   y = self._maybe_assert_valid_y(y)
   if self.power == 0.:
     return math_ops.log(y)
   # If large y accuracy is an issue, consider using:
   # (y**self.power - 1.) / self.power when y >> 1.
   return math_ops.expm1(math_ops.log(y) * self.power) / self.power
Exemplo n.º 6
0
def total_variation(logu, name=None):
  """The Total Variation Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Total-Variation Csiszar-function is:

  ```none
  f(u) = 0.5 |u - 1|
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    total_variation_of_u: Floating-type `Tensor` of the Csiszar-function
      evaluated at `u = exp(logu)`.
  """

  with ops.name_scope(name, "total_variation", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    return 0.5 * math_ops.abs(math_ops.expm1(logu))
 def _inverse(self, y):
   y = self._maybe_assert_valid_y(y)
   if self.power == 0.:
     return math_ops.log(y)
   # If large y accuracy is an issue, consider using:
   # (y**self.power - 1.) / self.power when y >> 1.
   return math_ops.expm1(math_ops.log(y) * self.power) / self.power
Exemplo n.º 8
0
def chi_square(logu, name=None):
  """The chi-Square Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Chi-square Csiszar-function is:

  ```none
  f(u) = u**2 - 1
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: Floating-type `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with ops.name_scope(name, "chi_square", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    return math_ops.expm1(2. * logu)
Exemplo n.º 9
0
def chi_square(logu, name=None):
    """The chi-Square Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Chi-square Csiszar-function is:

  ```none
  f(u) = u**2 - 1
  ```

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

    with ops.name_scope(name, "chi_square", [logu]):
        logu = ops.convert_to_tensor(logu, name="logu")
        return math_ops.expm1(2. * logu)
Exemplo n.º 10
0
def t_power(logu, t, self_normalized=False, name=None):
  """The T-Power Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the T-Power Csiszar-function is:

  ```none
  f(u) = s [ u**t - 1 - t(u - 1) ]
  s = { -1   0 < t < 1
      { +1   otherwise
  ```

  When `self_normalized = False` the `- t(u - 1)` term is omitted.

  This is similar to the `amari_alpha` Csiszar-function, with the associated
  divergence being the same up to factors depending only on `t`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    t:  `Tensor` of same `dtype` as `logu` and broadcastable shape.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """
  with ops.name_scope(name, "t_power", [logu, t]):
    logu = ops.convert_to_tensor(logu, name="logu")
    t = ops.convert_to_tensor(t, dtype=logu.dtype.base_dtype, name="t")
    fu = math_ops.expm1(t * logu)
    if self_normalized:
      fu -= t * math_ops.expm1(logu)
    fu *= array_ops.where(math_ops.logical_and(0. < t, t < 1.),
                          -array_ops.ones_like(t),
                          array_ops.ones_like(t))
    return fu
Exemplo n.º 11
0
 def _cdf(self, x):
     if self.validate_args:
         x = distribution_util.embed_check_nonnegative_integer_form(x)
     else:
         # Whether or not x is integer-form, the following is well-defined.
         # However, scipy takes the floor, so we do too.
         x = math_ops.floor(x)
     x *= array_ops.ones_like(self.probs)
     return array_ops.where(
         x < 0., array_ops.zeros_like(x), -math_ops.expm1(
             (1. + x) * math_ops.log1p(-self.probs)))
Exemplo n.º 12
0
 def _cdf(self, counts):
     if self.validate_args:
         # We set `check_integer=False` since the CDF is defined on whole real
         # line.
         counts = math_ops.floor(
             distribution_util.embed_check_nonnegative_discrete(
                 counts, check_integer=False))
     counts *= array_ops.ones_like(self.probs)
     return array_ops.where(
         counts < 0., array_ops.zeros_like(counts), -math_ops.expm1(
             (counts + 1) * math_ops.log1p(-self.probs)))
Exemplo n.º 13
0
def t_power(logu, t, self_normalized=False, name=None):
    """The T-Power Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the T-Power Csiszar-function is:

  ```none
  f(u) = s [ u**t - 1 - t(u - 1) ]
  s = { -1   0 < t < 1
      { +1   otherwise
  ```

  When `self_normalized = False` the `- t(u - 1)` term is omitted.

  This is similar to the `amari_alpha` Csiszar-function, with the associated
  divergence being the same up to factors depending only on `t`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    t:  `Tensor` of same `dtype` as `logu` and broadcastable shape.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """
    with ops.name_scope(name, "t_power", [logu, t]):
        logu = ops.convert_to_tensor(logu, name="logu")
        t = ops.convert_to_tensor(t, dtype=logu.dtype.base_dtype, name="t")
        fu = math_ops.expm1(t * logu)
        if self_normalized:
            fu -= t * math_ops.expm1(logu)
        fu *= array_ops.where(math_ops.logical_and(0. < t, t < 1.),
                              -array_ops.ones_like(t), array_ops.ones_like(t))
        return fu
Exemplo n.º 14
0
 def _cdf(self, x):
   if self.validate_args:
     x = distribution_util.embed_check_nonnegative_integer_form(x)
   else:
     # Whether or not x is integer-form, the following is well-defined.
     # However, scipy takes the floor, so we do too.
     x = math_ops.floor(x)
   x *= array_ops.ones_like(self.probs)
   return array_ops.where(
       x < 0.,
       array_ops.zeros_like(x),
       -math_ops.expm1((1. + x) * math_ops.log1p(-self.probs)))
Exemplo n.º 15
0
def softplus_inverse(x, name=None):
    """Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).

  Mathematically this op is equivalent to:

  ```none
  softplus_inverse = log(exp(x) - 1.)
  ```

  Args:
    x: `Tensor`. Non-negative (not enforced), floating-point.
    name: A name for the operation (optional).

  Returns:
    `Tensor`. Has the same type/shape as input `x`.
  """
    with ops.name_scope(name, "softplus_inverse", values=[x]):
        x = ops.convert_to_tensor(x, name="x")
        # We begin by deriving a more numerically stable softplus_inverse:
        # x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
        # ==> exp{x} = 1 + exp{y}                                (1)
        # ==> y = Log[exp{x} - 1]                                (2)
        #       = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
        #       = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
        #       = Log[1 - exp{-x}] + x                           (3)
        # (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
        # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
        # be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
        #
        # In addition to the numerically stable derivation above, we clamp
        # small/large values to be congruent with the logic in:
        # tensorflow/core/kernels/softplus_op.h
        #
        # Finally, we set the input to one whenever the input is too large or too
        # small. This ensures that no unchosen codepath is +/- inf. This is
        # necessary to ensure the gradient doesn't get NaNs. Recall that the
        # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
        # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
        # to overwrite `x` with ones only when we will never actually use this
        # value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
        threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
        is_too_small = math_ops.less(x, np.exp(threshold))
        is_too_large = math_ops.greater(x, -threshold)
        too_small_value = math_ops.log(x)
        too_large_value = x
        # This `where` will ultimately be a NOP because we won't select this
        # codepath whenever we used the surrogate `ones_like`.
        x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
                            array_ops.ones_like(x), x)
        y = x + math_ops.log(-math_ops.expm1(-x))  # == log(expm1(x))
        return array_ops.where(
            is_too_small, too_small_value,
            array_ops.where(is_too_large, too_large_value, y))
Exemplo n.º 16
0
 def _inverse_log_det_jacobian(self, y):
   # Could also do:
   #   ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
   #                              axis=event_dims)
   # but the following is more numerically stable. Ie,
   # Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
   # ==> dX/dY = exp{Y} / (exp{Y} - 1)
   #           = 1 / (1 - exp{-Y}),
   # which is the most stable for large Y > 0. For small Y, we use
   # 1 - exp{-Y} approx Y.
   return -math_ops.reduce_sum(math_ops.log(-math_ops.expm1(-y)),
                               axis=self._event_dims_tensor(y))
Exemplo n.º 17
0
def softplus_inverse(x, name=None):
  """Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).

  Mathematically this op is equivalent to:

  ```none
  softplus_inverse = log(exp(x) - 1.)
  ```

  Args:
    x: `Tensor`. Non-negative (not enforced), floating-point.
    name: A name for the operation (optional).

  Returns:
    `Tensor`. Has the same type/shape as input `x`.
  """
  with ops.name_scope(name, "softplus_inverse", values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    # We begin by deriving a more numerically stable softplus_inverse:
    # x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
    # ==> exp{x} = 1 + exp{y}                                (1)
    # ==> y = Log[exp{x} - 1]                                (2)
    #       = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
    #       = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
    #       = Log[1 - exp{-x}] + x                           (3)
    # (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
    # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
    # be zero.  To fix this, we use 1 - exp{-x} approx x for small x > 0.
    #
    # In addition to the numerically stable derivation above, we clamp
    # small/large values to be congruent with the logic in:
    # tensorflow/core/kernels/softplus_op.h
    #
    # Finally, we set the input to one whenever the input is too large or too
    # small. This ensures that no unchosen codepath is +/- inf. This is
    # necessary to ensure the gradient doesn't get NaNs. Recall that the
    # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
    # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
    # to overwrite `x` with ones only when we will never actually use this
    # value.  Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
    threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
    is_too_small = math_ops.less(x, np.exp(threshold))
    is_too_large = math_ops.greater(x, -threshold)
    too_small_value = math_ops.log(x)
    too_large_value = x
    # This `where` will ultimately be a NOP because we won't select this
    # codepath whenever we used the surrogate `ones_like`.
    x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
                        array_ops.ones_like(x), x)
    y = x + math_ops.log(-math_ops.expm1(-x))  # == log(expm1(x))
    return array_ops.where(is_too_small, too_small_value,
                           array_ops.where(is_too_large, too_large_value, y))
Exemplo n.º 18
0
 def _inverse_and_inverse_log_det_jacobian(self, y):
     y = self._maybe_assert_valid_y(y)
     event_dims = self._event_dims_tensor(y)
     if self.power == 0.:
         x = math_ops.log(y)
         ildj = -math_ops.reduce_sum(x, axis=event_dims)
         return x, ildj
     # TODO(jvdillon): If large y accuracy is an issue, consider using
     # (y**self.power - 1.) / self.power when y >> 1.
     x = math_ops.expm1(math_ops.log(y) * self.power) / self.power
     ildj = (self.power - 1.) * math_ops.reduce_sum(math_ops.log(y),
                                                    axis=event_dims)
     return x, ildj
Exemplo n.º 19
0
 def _cdf(self, counts):
   if self.validate_args:
     # We set `check_integer=False` since the CDF is defined on whole real
     # line.
     counts = math_ops.floor(
         distribution_util.embed_check_nonnegative_discrete(
             counts, check_integer=False))
   counts *= array_ops.ones_like(self.probs)
   return array_ops.where(
       counts < 0.,
       array_ops.zeros_like(counts),
       -math_ops.expm1(
           (counts + 1) * math_ops.log1p(-self.probs)))
Exemplo n.º 20
0
def modified_gan(logu, self_normalized=False, name=None):
  """The Modified-GAN Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the modified-GAN (Generative/Adversarial
  Network) Csiszar-function is:

  ```none
  f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
  ```

  When `self_normalized = False` the `0.5 (u - 1)` is omitted.

  The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
  `self_normalized = False`).

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: Floating-type `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with ops.name_scope(name, "chi_square", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    y = nn_ops.softplus(logu) - logu
    if self_normalized:
      y += 0.5 * math_ops.expm1(logu)
    return y
Exemplo n.º 21
0
def log1p_abs(logu, name=None):
  """The log1p-abs Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Log1p-Abs Csiszar-function is:

  ```none
  f(u) = u**(sign(u-1)) - 1
  ```

  This function is so-named because it was invented from the following recipe.
  Choose a convex function g such that g(0)=0 and solve for f:

  ```none
  log(1 + f(u)) = g(log(u)).
    <=>
  f(u) = exp(g(log(u))) - 1
  ```

  That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
  is `log`-domain.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    log1p_abs_of_u: Floating-type `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with ops.name_scope(name, "log1p_abs", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    return math_ops.expm1(math_ops.abs(logu))
Exemplo n.º 22
0
def log1p_abs(logu, name=None):
    """The log1p-abs Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Log1p-Abs Csiszar-function is:

  ```none
  f(u) = u**(sign(u-1)) - 1
  ```

  This function is so-named because it was invented from the following recipe.
  Choose a convex function g such that g(0)=0 and solve for f:

  ```none
  log(1 + f(u)) = g(log(u)).
    <=>
  f(u) = exp(g(log(u))) - 1
  ```

  That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
  is `log`-domain.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

    with ops.name_scope(name, "log1p_abs", [logu]):
        logu = ops.convert_to_tensor(logu, name="logu")
        return math_ops.expm1(math_ops.abs(logu))
Exemplo n.º 23
0
def modified_gan(logu, self_normalized=False, name=None):
    """The Modified-GAN Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the modified-GAN (Generative/Adversarial
  Network) Csiszar-function is:

  ```none
  f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
  ```

  When `self_normalized = False` the `0.5 (u - 1)` is omitted.

  The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
  `self_normalized = False`).

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

    with ops.name_scope(name, "chi_square", [logu]):
        logu = ops.convert_to_tensor(logu, name="logu")
        y = nn_ops.softplus(logu) - logu
        if self_normalized:
            y += 0.5 * math_ops.expm1(logu)
        return y
Exemplo n.º 24
0
def jeffreys(logu, name=None):
  """The Jeffreys Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Jeffreys Csiszar-function is:

  ```none
  f(u) = 0.5 ( u log(u) - log(u) )
       = 0.5 kl_forward + 0.5 kl_reverse
       = symmetrized_csiszar_function(kl_reverse)
       = symmetrized_csiszar_function(kl_forward)
  ```

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    jeffreys_of_u: Floating-type `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

  with ops.name_scope(name, "jeffreys", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    return 0.5 * math_ops.expm1(logu) * logu
Exemplo n.º 25
0
def jeffreys(logu, name=None):
    """The Jeffreys Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  The Jeffreys Csiszar-function is:

  ```none
  f(u) = 0.5 ( u log(u) - log(u) )
       = 0.5 kl_forward + 0.5 kl_reverse
       = symmetrized_csiszar_function(kl_reverse)
       = symmetrized_csiszar_function(kl_forward)
  ```

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """

    with ops.name_scope(name, "jeffreys", [logu]):
        logu = ops.convert_to_tensor(logu, name="logu")
        return 0.5 * math_ops.expm1(logu) * logu
Exemplo n.º 26
0
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
  """The Amari-alpha Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True`, the Amari-alpha Csiszar-function is:

  ```none
  f(u) = { -log(u) + (u - 1),     alpha = 0
         { u log(u) - (u - 1),    alpha = 1
         { [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)),    otherwise
  ```

  When `self_normalized = False` the `(u - 1)` terms are omitted.

  Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
  non-log-space calculations and may therefore be numerically unstable for
  `|logu| >> 0`.

  For more information, see:
    A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
    Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
    1532-1568, 2010.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    alpha: Floating-type Python scalar. (See Mathematical Details for meaning.)
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    amari_alpha_of_u: Floating-type `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.

  Raises:
    TypeError: if `alpha` is `None` or a `Tensor`.
    TypeError: if `self_normalized` is `None` or a `Tensor`.
  """
  with ops.name_scope(name, "amari_alpha", [logu]):
    if alpha is None or contrib_framework.is_tensor(alpha):
      raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
    if self_normalized is None or contrib_framework.is_tensor(self_normalized):
      raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")

    logu = ops.convert_to_tensor(logu, name="logu")

    if alpha == 0.:
      f = -logu
    elif alpha == 1.:
      f = math_ops.exp(logu) * logu
    else:
      f = math_ops.expm1(alpha * logu) / (alpha * (alpha - 1.))

    if not self_normalized:
      return f

    if alpha == 0.:
      return f + math_ops.expm1(logu)
    elif alpha == 1.:
      return f - math_ops.expm1(logu)
    else:
      return f - math_ops.expm1(logu) / (alpha - 1.)
Exemplo n.º 27
0
 def _forward(self, x):
   x = self._maybe_assert_valid_x(x)
   return -math_ops.expm1(-((x / self.scale) ** self.concentration))
Exemplo n.º 28
0
 def _forward(self, x):
   x = self._maybe_assert_valid_x(x)
   return -math_ops.expm1(-((x / self.scale) ** self.concentration))
Exemplo n.º 29
0
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
    """The Amari-alpha Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True`, the Amari-alpha Csiszar-function is:

  ```none
  f(u) = { -log(u) + (u - 1),     alpha = 0
         { u log(u) - (u - 1),    alpha = 1
         { [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)),    otherwise
  ```

  When `self_normalized = False` the `(u - 1)` terms are omitted.

  Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
  non-log-space calculations and may therefore be numerically unstable for
  `|logu| >> 0`.

  For more information, see:
    A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
    Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
    1532-1568, 2010.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.

  Raises:
    TypeError: if `alpha` is `None` or a `Tensor`.
    TypeError: if `self_normalized` is `None` or a `Tensor`.
  """
    with ops.name_scope(name, "amari_alpha", [logu]):
        if alpha is None or contrib_framework.is_tensor(alpha):
            raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
        if self_normalized is None or contrib_framework.is_tensor(
                self_normalized):
            raise TypeError(
                "`self_normalized` cannot be `None` or `Tensor` type.")

        logu = ops.convert_to_tensor(logu, name="logu")

        if alpha == 0.:
            f = -logu
        elif alpha == 1.:
            f = math_ops.exp(logu) * logu
        else:
            f = math_ops.expm1(alpha * logu) / (alpha * (alpha - 1.))

        if not self_normalized:
            return f

        if alpha == 0.:
            return f + math_ops.expm1(logu)
        elif alpha == 1.:
            return f - math_ops.expm1(logu)
        else:
            return f - math_ops.expm1(logu) / (alpha - 1.)