コード例 #1
0
  def _testGradient(self, x, use_gpu=False, **kwargs):
    with self.cached_session(use_gpu=use_gpu):
      x = ops.convert_to_tensor(x, dtype=dtypes.float64)

      grad_naive_theoretical, _ = gradient_checker_v2.compute_gradient(
          lambda y: math_ops.cumsum(math_ops.exp(y), **kwargs), [x])
      grad_fused_theoretical, _ = gradient_checker_v2.compute_gradient(
          lambda y: math_ops.exp(math_ops.cumulative_logsumexp(y, **kwargs)),
          [x])

      self.assertAllClose(grad_fused_theoretical, grad_naive_theoretical)
コード例 #2
0
  def test1DLarge(self):
    # This test ensures that the operation is correct even when the naive
    # implementation would overflow.
    x_np = np.arange(20) * 20.0

    for use_gpu in (True, False):
      with self.cached_session(use_gpu=use_gpu):
        x_tf = ops.convert_to_tensor(x_np, dtype=dtypes.float32)

        result_fused = self.evaluate(math_ops.cumulative_logsumexp(x_tf))
        result_map = self.evaluate(self._logSumExpMap(x_tf))

      self.assertAllClose(result_fused, result_map)
コード例 #3
0
 def _computeLogSumExp(self, x, **kwargs):
   result_naive = math_ops.cumsum(math_ops.exp(x), **kwargs)
   result_fused = math_ops.exp(math_ops.cumulative_logsumexp(x, **kwargs))
   return result_naive, result_fused