예제 #1
0
 def testXlogyWithZero(self):
   for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
     x = constant_op.constant(np.zeros((2, 3)), dtype=dtype)
     y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
     with self.cached_session(use_gpu=True):
       xlogy_tf_np = self.evaluate(math_ops.xlogy(x, y))
       zeros_np = self.evaluate(array_ops.zeros_like(y))
       self.assertAllClose(xlogy_tf_np, zeros_np)
예제 #2
0
 def testXlogyNoZero(self):
   for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
     x = constant_op.constant([[0.1, 0.2, 3.5], [-2., -5., 30.]], dtype=dtype)
     y = constant_op.constant([[0.1, 0.2, 3.5], [3.1, 4., 2.]], dtype=dtype)
     with self.cached_session(use_gpu=True):
       xlogy = self.evaluate(math_ops.xlogy(x, y))
       xtimeslogy = self.evaluate(x * math_ops.log(y))
       self.assertAllClose(xlogy, xtimeslogy)
예제 #3
0
 def testXlogyWithZeroBroadcast(self):
   for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
     x = constant_op.constant([[0.], [1.]], dtype=dtype)
     y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
     with self.cached_session(use_gpu=True):
       xlogy_tf_np = self.evaluate(math_ops.xlogy(x, y))
       zeros_np = self.evaluate(array_ops.zeros_like(y[0]))
       xtimes_logy = self.evaluate(math_ops.log(y[1]))
       self.assertAllClose(zeros_np, xlogy_tf_np[0])
       self.assertAllClose(xtimes_logy, xlogy_tf_np[1])
예제 #4
0
 def testXlogyNoZero(self):
     for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
         x = constant_op.constant([[0.1, 0.2, 3.5], [-2., -5., 30.]],
                                  dtype=dtype)
         y = constant_op.constant([[0.1, 0.2, 3.5], [3.1, 4., 2.]],
                                  dtype=dtype)
         with test_util.use_gpu():
             xlogy = self.evaluate(math_ops.xlogy(x, y))
             xtimeslogy = self.evaluate(x * math_ops.log(y))
             self.assertAllClose(xlogy, xtimeslogy)
예제 #5
0
 def testXlogyWithZeroBroadcast(self):
   for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
     x = constant_op.constant([[0.], [1.]], dtype=dtype)
     y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
     with test_util.use_gpu():
       xlogy_tf_np = self.evaluate(math_ops.xlogy(x, y))
       zeros_np = self.evaluate(array_ops.zeros_like(y[0]))
       xtimes_logy = self.evaluate(math_ops.log(y[1]))
       self.assertAllClose(zeros_np, xlogy_tf_np[0])
       self.assertAllClose(xtimes_logy, xlogy_tf_np[1])
예제 #6
0
def implicit_reparameterization_grad(a, x):
    log_prob = math_ops.xlogy(a - 1., x) - math_ops.lgamma(a) - x
    prob = math_ops.exp(log_prob)
    return -gen_math_ops.igamma_grad_a(a, x) / prob
예제 #7
0
 def _xlogy_gradients(self, x, y):
   xlogy_xgrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), x)[0])
   xlogy_ygrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), y)[0])
   return xlogy_xgrad, xlogy_ygrad
예제 #8
0
 def _log_unnormalized_prob(self, x):
   x = self._maybe_assert_valid_sample(x)
   return math_ops.reduce_sum(math_ops.xlogy(self.concentration - 1., x), -1)
예제 #9
0
 def _log_unnormalized_prob(self, x):
   x = self._maybe_assert_valid_sample(x)
   return (math_ops.xlogy(self.concentration1 - 1., x) +
           (self.concentration0 - 1.) * math_ops.log1p(-x))
예제 #10
0
 def _log_unnormalized_prob(self, x):
   x = self._maybe_assert_valid_sample(x)
   return (math_ops.xlogy(self.concentration1 - 1., x) +
           (self.concentration0 - 1.) * math_ops.log1p(-x))  # pylint: disable=invalid-unary-operand-type