Example #1
0
    def testBetaIncGrads(self):
        err_tolerance = 1e-3
        with self.cached_session():
            # Test gradient
            ga_s = np.abs(np.random.randn(2, 2) * 30)  # in (0, infty)
            gb_s = np.abs(np.random.randn(2, 2) * 30)  # in (0, infty)
            gx_s = np.random.rand(2, 2)  # in (0, 1)
            tf_ga_s = constant_op.constant(ga_s, dtype=dtypes.float64)
            tf_gb_s = constant_op.constant(gb_s, dtype=dtypes.float64)
            tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
            tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
            err = gradient_checker.compute_gradient_error([tf_gx_s],
                                                          [gx_s.shape],
                                                          tf_gout_t,
                                                          gx_s.shape)
            tf_logging.info("betainc gradient err = %g " % err)
            self.assertLess(err, err_tolerance)

            # Test broadcast gradient
            gx_s = np.random.rand()  # in (0, 1)
            tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
            tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
            err = gradient_checker.compute_gradient_error([tf_gx_s], [()],
                                                          tf_gout_t,
                                                          ga_s.shape)
            tf_logging.info("betainc gradient err = %g " % err)
            self.assertLess(err, err_tolerance)
Example #2
0
 def _cdf(self, x):
   # we use the same notation here as in wikipedia for the
   t = (x - self.mu)/self.sigma
   x_t = self.df / (math_ops.square(t) + self.df)
   # The cdf is defined differently for positive and negative t
   positive_cdf = 1. - 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
   negative_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
   return tf.where(tf.less(t, 0), negative_cdf, positive_cdf)
Example #3
0
 def _cdf(self, x):
     # we use the same notation here as in wikipedia for the
     t = (x - self.mu) / self.sigma
     x_t = self.df / (math_ops.square(t) + self.df)
     # The cdf is defined differently for positive and negative t
     positive_cdf = 1. - 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
     negative_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
     return tf.where(tf.less(t, 0), negative_cdf, positive_cdf)
Example #4
0
    def testBetaIncFpropAndBpropAreNeverNAN(self):
        with self.cached_session() as sess:
            space = np.logspace(-8, 5).tolist()
            if platform.machine() == 'aarch64':
                space_x = np.linspace(1e-16, 1 - 1e-7).tolist()
            else:
                space_x = np.linspace(1e-16, 1 - 1e-16).tolist()
            ga_s, gb_s, gx_s = zip(
                *list(itertools.product(space, space, space_x)))
            # Test grads are never nan
            ga_s_t = constant_op.constant(ga_s, dtype=dtypes.float32)
            gb_s_t = constant_op.constant(gb_s, dtype=dtypes.float32)
            gx_s_t = constant_op.constant(gx_s, dtype=dtypes.float32)
            tf_gout_t = math_ops.betainc(ga_s_t, gb_s_t, gx_s_t)
            tf_gout, grads_x = sess.run([
                tf_gout_t,
                gradients_impl.gradients(tf_gout_t,
                                         [ga_s_t, gb_s_t, gx_s_t])[2]
            ])

            # Equivalent to `assertAllFalse` (if it existed).
            self.assertAllEqual(
                np.zeros_like(grads_x).astype(np.bool), np.isnan(tf_gout))
            self.assertAllEqual(
                np.zeros_like(grads_x).astype(np.bool), np.isnan(grads_x))
Example #5
0
 def _cdf(self, positive_counts):
     if self.validate_args:
         positive_counts = math_ops.floor(
             distribution_util.embed_check_nonnegative_discrete(
                 positive_counts, check_integer=False))
     return math_ops.betainc(self.total_count, positive_counts + 1.,
                             math_ops.sigmoid(-self.logits))
 def _cdf(self, positive_counts):
   if self.validate_args:
     positive_counts = math_ops.floor(
         distribution_util.embed_check_nonnegative_discrete(
             positive_counts, check_integer=False))
   return math_ops.betainc(
       self.total_count, positive_counts + 1.,
       math_ops.sigmoid(-self.logits))
Example #7
0
  def testBetaIncGrads(self):
    err_tolerance = 1e-3
    with self.cached_session():
      # Test gradient
      ga_s = np.abs(np.random.randn(2, 2) * 30)  # in (0, infty)
      gb_s = np.abs(np.random.randn(2, 2) * 30)  # in (0, infty)
      gx_s = np.random.rand(2, 2)  # in (0, 1)
      tf_ga_s = constant_op.constant(ga_s, dtype=dtypes.float64)
      tf_gb_s = constant_op.constant(gb_s, dtype=dtypes.float64)
      tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
      tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
      err = gradient_checker.compute_gradient_error(
          [tf_gx_s], [gx_s.shape], tf_gout_t, gx_s.shape)
      tf_logging.info("betainc gradient err = %g " % err)
      self.assertLess(err, err_tolerance)

      # Test broadcast gradient
      gx_s = np.random.rand()  # in (0, 1)
      tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
      tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
      err = gradient_checker.compute_gradient_error(
          [tf_gx_s], [()], tf_gout_t, ga_s.shape)
      tf_logging.info("betainc gradient err = %g " % err)
      self.assertLess(err, err_tolerance)
Example #8
0
  def testBetaIncFpropAndBpropAreNeverNAN(self):
    with self.cached_session() as sess:
      space = np.logspace(-8, 5).tolist()
      space_x = np.linspace(1e-16, 1 - 1e-16).tolist()
      ga_s, gb_s, gx_s = zip(*list(itertools.product(space, space, space_x)))
      # Test grads are never nan
      ga_s_t = constant_op.constant(ga_s, dtype=dtypes.float32)
      gb_s_t = constant_op.constant(gb_s, dtype=dtypes.float32)
      gx_s_t = constant_op.constant(gx_s, dtype=dtypes.float32)
      tf_gout_t = math_ops.betainc(ga_s_t, gb_s_t, gx_s_t)
      tf_gout, grads_x = sess.run(
          [tf_gout_t,
           gradients_impl.gradients(tf_gout_t, [ga_s_t, gb_s_t, gx_s_t])[2]])

      # Equivalent to `assertAllFalse` (if it existed).
      self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),
                          np.isnan(tf_gout))
      self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),
                          np.isnan(grads_x))
Example #9
0
def _bdtr(k, n, p):
  """The binomial cumulative distribution function.

  Args:
    k: floating point `Tensor`.
    n: floating point `Tensor`.
    p: floating point `Tensor`.

  Returns:
    `sum_{j=0}^k p^j (1 - p)^(n - j)`.
  """
  # Trick for getting safe backprop/gradients into n, k when
  #   betainc(a = 0, ..) = nan
  # Write:
  #   where(unsafe, safe_output, betainc(where(unsafe, safe_input, input)))
  ones = array_ops.ones_like(n - k)
  k_eq_n = math_ops.equal(k, n)
  safe_dn = array_ops.where(k_eq_n, ones, n - k)
  dk = math_ops.betainc(a=safe_dn, b=k + 1, x=1 - p)
  return array_ops.where(k_eq_n, ones, dk)
Example #10
0
 def _cdf(self, x):
   return math_ops.betainc(self.a, self.b, x)
Example #11
0
 def _cdf(self, x):
     return math_ops.betainc(self.concentration1, self.concentration0, x)
Example #12
0
 def _cdf(self, x):
   if self.validate_args:
     x = distribution_util.embed_check_nonnegative_integer_form(x)
   return math_ops.betainc(self.total_count, 1. + x,
                           math_ops.sigmoid(-self.logits))
Example #13
0
    def _testBetaInc(self, a_s, b_s, x_s, dtype):
        try:
            from scipy import special  # pylint: disable=g-import-not-at-top
            np_dt = dtype.as_numpy_dtype

            # Test random values
            a_s = a_s.astype(np_dt)  # in (0, infty)
            b_s = b_s.astype(np_dt)  # in (0, infty)
            x_s = x_s.astype(np_dt)  # in (0, 1)
            tf_a_s = constant_op.constant(a_s, dtype=dtype)
            tf_b_s = constant_op.constant(b_s, dtype=dtype)
            tf_x_s = constant_op.constant(x_s, dtype=dtype)
            tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s)
            with self.cached_session():
                tf_out = self.evaluate(tf_out_t)
            scipy_out = special.betainc(a_s, b_s, x_s, dtype=np_dt)

            # the scipy version of betainc uses a double-only implementation.
            # TODO(ebrevdo): identify reasons for (sometime) precision loss
            # with doubles
            rtol = 1e-4
            atol = 1e-5
            self.assertAllCloseAccordingToType(scipy_out,
                                               tf_out,
                                               rtol=rtol,
                                               atol=atol)

            # Test out-of-range values (most should return nan output)
            combinations = list(
                itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))
            a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)),
                                                dtype=np_dt)
            with self.cached_session():
                tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()
            scipy_comb = special.betainc(a_comb, b_comb, x_comb, dtype=np_dt)
            self.assertAllCloseAccordingToType(scipy_comb,
                                               tf_comb,
                                               rtol=rtol,
                                               atol=atol)

            # Test broadcasting between scalars and other shapes
            with self.cached_session():
                self.assertAllCloseAccordingToType(
                    special.betainc(0.1, b_s, x_s, dtype=np_dt),
                    math_ops.betainc(0.1, b_s, x_s).eval(),
                    rtol=rtol,
                    atol=atol)
                self.assertAllCloseAccordingToType(
                    special.betainc(a_s, 0.1, x_s, dtype=np_dt),
                    math_ops.betainc(a_s, 0.1, x_s).eval(),
                    rtol=rtol,
                    atol=atol)
                self.assertAllCloseAccordingToType(
                    special.betainc(a_s, b_s, 0.1, dtype=np_dt),
                    math_ops.betainc(a_s, b_s, 0.1).eval(),
                    rtol=rtol,
                    atol=atol)
                self.assertAllCloseAccordingToType(
                    special.betainc(0.1, b_s, 0.1, dtype=np_dt),
                    math_ops.betainc(0.1, b_s, 0.1).eval(),
                    rtol=rtol,
                    atol=atol)
                self.assertAllCloseAccordingToType(
                    special.betainc(0.1, 0.1, 0.1, dtype=np_dt),
                    math_ops.betainc(0.1, 0.1, 0.1).eval(),
                    rtol=rtol,
                    atol=atol)

            with self.assertRaisesRegex(ValueError, "must be equal"):
                math_ops.betainc(0.5, [0.5], [[0.5]])

            with self.cached_session():
                with self.assertRaisesOpError("Shapes of .* are inconsistent"):
                    a_p = array_ops.placeholder(dtype)
                    b_p = array_ops.placeholder(dtype)
                    x_p = array_ops.placeholder(dtype)
                    math_ops.betainc(a_p, b_p, x_p).eval(feed_dict={
                        a_p: 0.5,
                        b_p: [0.5],
                        x_p: [[0.5]]
                    })

        except ImportError as e:
            tf_logging.warn("Cannot test special functions: %s" % str(e))
Example #14
0
 def _cdf(self, x):
     # Take Abs(scale) to make subsequent where work correctly.
     y = (x - self.loc) / math_ops.abs(self.scale)
     x_t = self.df / (y**2. + self.df)
     neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
     return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
Example #15
0
 def _cdf(self, x):
   return math_ops.betainc(self.a, self.b, x)
Example #16
0
 def _cdf(self, x):
   return math_ops.betainc(self.concentration1, self.concentration0, x)
 def _cdf(self, x):
     if self.validate_args:
         x = distribution_util.embed_check_nonnegative_integer_form(x)
     return math_ops.betainc(self.total_count, 1. + x,
                             math_ops.sigmoid(-self.logits))
Example #18
0
 def _cdf(self, x):
   # Take Abs(scale) to make subsequent where work correctly.
   y = (x - self.loc) / math_ops.abs(self.scale)
   x_t = self.df / (y**2. + self.df)
   neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
   return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
Example #19
0
  def _testBetaInc(self, a_s, b_s, x_s, dtype):
    try:
      from scipy import special  # pylint: disable=g-import-not-at-top
      np_dt = dtype.as_numpy_dtype

      # Test random values
      a_s = a_s.astype(np_dt)  # in (0, infty)
      b_s = b_s.astype(np_dt)  # in (0, infty)
      x_s = x_s.astype(np_dt)  # in (0, 1)
      tf_a_s = constant_op.constant(a_s, dtype=dtype)
      tf_b_s = constant_op.constant(b_s, dtype=dtype)
      tf_x_s = constant_op.constant(x_s, dtype=dtype)
      tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s)
      with self.cached_session():
        tf_out = tf_out_t.eval()
      scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt)

      # the scipy version of betainc uses a double-only implementation.
      # TODO(ebrevdo): identify reasons for (sometime) precision loss
      # with doubles
      tol = 1e-4 if dtype == dtypes.float32 else 5e-5
      self.assertAllCloseAccordingToType(scipy_out, tf_out, rtol=tol, atol=0)

      # Test out-of-range values (most should return nan output)
      combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))
      a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt)
      with self.cached_session():
        tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()
      scipy_comb = special.betainc(a_comb, b_comb, x_comb).astype(np_dt)
      self.assertAllCloseAccordingToType(scipy_comb, tf_comb)

      # Test broadcasting between scalars and other shapes
      with self.cached_session():
        self.assertAllCloseAccordingToType(
            special.betainc(0.1, b_s, x_s).astype(np_dt),
            math_ops.betainc(0.1, b_s, x_s).eval(),
            rtol=tol,
            atol=0)
        self.assertAllCloseAccordingToType(
            special.betainc(a_s, 0.1, x_s).astype(np_dt),
            math_ops.betainc(a_s, 0.1, x_s).eval(),
            rtol=tol,
            atol=0)
        self.assertAllCloseAccordingToType(
            special.betainc(a_s, b_s, 0.1).astype(np_dt),
            math_ops.betainc(a_s, b_s, 0.1).eval(),
            rtol=tol,
            atol=0)
        self.assertAllCloseAccordingToType(
            special.betainc(0.1, b_s, 0.1).astype(np_dt),
            math_ops.betainc(0.1, b_s, 0.1).eval(),
            rtol=tol,
            atol=0)
        self.assertAllCloseAccordingToType(
            special.betainc(0.1, 0.1, 0.1).astype(np_dt),
            math_ops.betainc(0.1, 0.1, 0.1).eval(),
            rtol=tol,
            atol=0)

      with self.assertRaisesRegexp(ValueError, "must be equal"):
        math_ops.betainc(0.5, [0.5], [[0.5]])

      with self.cached_session():
        with self.assertRaisesOpError("Shapes of .* are inconsistent"):
          a_p = array_ops.placeholder(dtype)
          b_p = array_ops.placeholder(dtype)
          x_p = array_ops.placeholder(dtype)
          math_ops.betainc(a_p, b_p, x_p).eval(
              feed_dict={a_p: 0.5,
                         b_p: [0.5],
                         x_p: [[0.5]]})

    except ImportError as e:
      tf_logging.warn("Cannot test special functions: %s" % str(e))