예제 #1
0
    def test_log1psquare(self, x, expected_y, expected_dy_dx):
        x = tf.convert_to_tensor(x)
        with tf.GradientTape() as tape:
            tape.watch(x)
            y = numeric.log1psquare(x)
        dy_dx = tape.gradient(y, x)

        y, dy_dx = self.evaluate([y, dy_dx])

        self.assertAllClose(expected_y, y)
        self.assertAllClose(expected_dy_dx, dy_dx)
예제 #2
0
  def test_log1psquare(self, x, expected_y, expected_dy_dx):
    x = tf.convert_to_tensor(x)
    with tf.GradientTape() as tape:
      tape.watch(x)
      y = numeric.log1psquare(x)
    dy_dx = tape.gradient(y, x)

    y, dy_dx = self.evaluate([y, dy_dx])

    self.assertAllClose(expected_y, y)
    self.assertAllClose(expected_dy_dx, dy_dx)
예제 #3
0
def log_prob(x, df, loc, scale):
    """Compute log probability of Student T distribution.

  Note that scale can be negative.

  Args:
    x: Floating-point `Tensor`. Where to compute the log probabilities.
    df: Floating-point `Tensor`. The degrees of freedom of the
      distribution(s). `df` must contain only positive values.
    loc: Floating-point `Tensor`; the location(s) of the distribution(s).
    scale: Floating-point `Tensor`; the scale(s) of the distribution(s).

  Returns:
    A `Tensor` with shape broadcast according to the arguments.
  """
    # Writing `y` this way reduces XLA mem copies.
    y = (x - loc) * (tf.math.rsqrt(df) / scale)
    log_unnormalized_prob = -0.5 * (df + 1.) * log1psquare(y)
    log_normalization = (tf.math.log(tf.abs(scale)) + 0.5 * tf.math.log(df) +
                         0.5 * np.log(np.pi) +
                         tfp_math.log_gamma_difference(0.5, 0.5 * df))
    return log_unnormalized_prob - log_normalization