def _testSoftplus(self, np_features, use_gpu=False):
    np_features = np.asarray(np_features)
    np_softplus = self._npSoftplus(np_features)
    with self.test_session(use_gpu=use_gpu) as sess:
      softplus = nn_ops.softplus(np_features)
      softplus_inverse = distribution_util.softplus_inverse(softplus)
      [tf_softplus, tf_softplus_inverse] = sess.run([
          softplus, softplus_inverse])
    self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
    rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
        str(np_features.dtype), 1e-6)
    # This will test that we correctly computed the inverse by verifying we
    # recovered the original input.
    self.assertAllCloseAccordingToType(
        np_features, tf_softplus_inverse,
        atol=0., rtol=rtol)
    self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
                        tf_softplus > 0)

    self.assertShapeEqual(np_softplus, softplus)
    self.assertShapeEqual(np_softplus, softplus_inverse)

    self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
                        np.isfinite(tf_softplus))
    self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
                        np.isfinite(tf_softplus_inverse))
    def _testSoftplus(self, np_features, use_gpu=False):
        np_features = np.asarray(np_features)
        np_softplus = self._npSoftplus(np_features)
        with self.test_session(use_gpu=use_gpu) as sess:
            softplus = nn_ops.softplus(np_features)
            softplus_inverse = distribution_util.softplus_inverse(softplus)
            [tf_softplus,
             tf_softplus_inverse] = sess.run([softplus, softplus_inverse])
        self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
        rtol = {
            "float16": 0.07,
            "float32": 0.003,
            "float64": 0.002
        }.get(str(np_features.dtype), 1e-6)
        # This will test that we correctly computed the inverse by verifying we
        # recovered the original input.
        self.assertAllCloseAccordingToType(np_features,
                                           tf_softplus_inverse,
                                           atol=0.,
                                           rtol=rtol)
        self.assertAllEqual(
            np.ones_like(tf_softplus).astype(np.bool), tf_softplus > 0)

        self.assertShapeEqual(np_softplus, softplus)
        self.assertShapeEqual(np_softplus, softplus_inverse)

        self.assertAllEqual(
            np.ones_like(tf_softplus).astype(np.bool),
            np.isfinite(tf_softplus))
        self.assertAllEqual(
            np.ones_like(tf_softplus_inverse).astype(np.bool),
            np.isfinite(tf_softplus_inverse))
 def testInverseSoftplusGradientNeverNan(self):
   with self.test_session():
     # Note that this range contains both zero and inf.
     x = constant_op.constant((10.**np.arange(-8, 6)).astype(np.float16))
     y = distribution_util.softplus_inverse(x).eval()
     # Equivalent to `assertAllFalse` (if it existed).
     self.assertAllEqual(np.zeros_like(y).astype(np.bool), np.isnan(y))
 def testInverseSoftplusGradientNeverNan(self):
   with self.test_session():
     # Note that this range contains both zero and inf.
     x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))
     y = distribution_util.softplus_inverse(x)
     grads = gradients_impl.gradients(y, x)[0].eval()
     # Equivalent to `assertAllFalse` (if it existed).
     self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))
 def testInverseSoftplusGradientNeverNan(self):
     with self.test_session():
         # Note that this range contains both zero and inf.
         x = constant_op.constant((10.**np.arange(-8,
                                                  6)).astype(np.float16))
         y = distribution_util.softplus_inverse(x).eval()
         # Equivalent to `assertAllFalse` (if it existed).
         self.assertAllEqual(np.zeros_like(y).astype(np.bool), np.isnan(y))
 def testInverseSoftplusGradientNeverNan(self):
     with self.test_session():
         # Note that this range contains both zero and inf.
         x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))
         y = distribution_util.softplus_inverse(x)
         grads = gradients_impl.gradients(y, x)[0].eval()
         # Equivalent to `assertAllFalse` (if it existed).
         self.assertAllEqual(
             np.zeros_like(grads).astype(np.bool), np.isnan(grads))
示例#7
0
 def _inverse_and_inverse_log_det_jacobian(self, y):
     event_dims = self._event_dims_tensor(y)
     # Could also do:
     #   ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
     #                              axis=event_dims)
     # but the following is more numerically stable. Ie,
     # Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
     # ==> dX/dY = exp{Y} / (exp{Y} - 1)
     #           = 1 / (1 - exp{-Y}),
     # which is the most stable for large Y > 0. For small Y, we use
     # 1 - exp{-Y} approx Y.
     ildj = -math_ops.reduce_sum(math_ops.log(-math_ops.expm1(-y)),
                                 axis=event_dims)
     return distribution_util.softplus_inverse(y), ildj
 def _inverse(self, y):
   return distribution_util.softplus_inverse(y)