Пример #1
0
  def _quantile(self, p, loc=None, scale=None, low=None, high=None):
    loc, scale, low, high = self._loc_scale_low_high(loc, scale, low, high)
    std_low, std_high = self._standardized_low_and_high(
        low=low, high=high, loc=loc, scale=scale)
    # Use the sum of tangents formula.
    # First, the quantile of the cauchy distribution is tan(pi * (x - 0.5)).
    # and the cdf of the cauchy distribution is 0.5 + arctan(x) / np.pi
    # WLOG, we will assume loc = 0 , scale = 1 (these can be taken in to account
    # by rescaling and shifting low and high, and then scaling the output).
    # We would like to compute quantile(p * (cdf(high) - cdf(low)) + cdf(low))
    # This is the same as:
    # tan(pi * (cdf(low) + (cdf(high) - cdf(low)) * p - 0.5))
    # Let a = pi * (cdf(low) - 0.5), b = pi * (cdf(high) - cdf(low)) * u
    # By using the formula for the cdf we have:
    # a = arctan(low), b = arctan_difference(high, low) * u
    # Thus the quantile is now tan(a + b).
    # By appealing to the sum of tangents formula we have:
    # tan(a + b) = (tan(a) + tan(b)) / (1 - tan(a) * tan(b)) =
    # (low + tan(b)) / (1 - low * tan(b))
    # Thus for a 'standard' truncated cauchy we have the quantile as:
    # quantile(p) = (low + tan(b)) / (1 - low * tan(b)) where
    # b = arctan_difference(high, low) * p.

    tanb = tf.math.tan(tfp_math.atan_difference(std_high, std_low) * p)
    x = (std_low + tanb) / (1 - std_low * tanb)
    # Clip the answer to prevent it from falling numerically outside
    # the support.
    return numeric.clip_by_value_preserve_gradient(
        x * scale + loc, clip_value_min=low, clip_value_max=high)
Пример #2
0
 def test_clip_by_value_preserve_grad(self, x, lo, hi, expected_y):
     expected_dydx = np.ones_like(x)
     x = tf.convert_to_tensor(value=x, dtype=self.dtype)
     y, dydx = tfp_math_gradient.value_and_gradient(
         lambda x_: numeric.clip_by_value_preserve_gradient(x_, lo, hi), x)
     y_, dydx_ = self.evaluate([y, dydx])
     self.assertAllClose(expected_y, y_)
     self.assertAllClose(expected_dydx, dydx_)
Пример #3
0
    def test_clip_by_value_preserve_grad(self, x, lo, hi, expected_y):
        expected_dy_dx = np.ones_like(x)
        x = tf.convert_to_tensor(x, dtype=self.dtype)
        with tf.GradientTape() as tape:
            tape.watch(x)
            y = numeric.clip_by_value_preserve_gradient(x, lo, hi)
        dy_dx = tape.gradient(y, x)

        y, dy_dx = self.evaluate([y, dy_dx])

        self.assertAllClose(expected_y, y)
        self.assertAllClose(expected_dy_dx, dy_dx)
Пример #4
0
 def _forward(self, x):
     return clip_by_value_preserve_gradient(x, self._clip_value_min,
                                            self._clip_value_max)