Пример #1
0
 def _RunAndVerify(self, dtype):
     with self.test_session():
         # random shape
         shape = np.random.randint(1, 16, size=4)
         # Make depth at least 2 to make it meaningful
         shape[3] += 1
         p = array_ops.placeholder(dtype, shape=shape)
         # random depth_radius, bias, alpha, beta
         lrn_depth_radius = np.random.randint(1, shape[3])
         bias = 1.0 + np.random.rand()
         alpha = 2.0 * np.random.rand()
         beta = 2.0 * np.random.rand()
         with self.test_scope():
             lrn_t = nn.local_response_normalization(
                 p,
                 name="lrn",
                 depth_radius=lrn_depth_radius,
                 bias=bias,
                 alpha=alpha,
                 beta=beta)
         params = {p: np.random.rand(*shape).astype("f")}
         result = lrn_t.eval(feed_dict=params)
     expected = self._LRN(params[p],
                          lrn_depth_radius=lrn_depth_radius,
                          bias=bias,
                          alpha=alpha,
                          beta=beta)
     err = np.amax(np.abs(result - expected))
     print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta,
           " is ", err)
     if dtype == dtypes.float32:
         self.assertTrue(err < 1e-4)
     else:
         self.assertTrue(err < 1e-2)
     self.assertShapeEqual(expected, lrn_t)
Пример #2
0
  def _RunAndVerifyGradients(self, dtype):
    with self.test_session(use_gpu=True):
      # random shape
      shape = np.random.randint(1, 5, size=4)
      # Make depth at least 2 to make it meaningful
      shape[3] += 1
      # random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
      # be in [1, 7].
      lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
      bias = 1.0 + np.random.rand()
      alpha = 1.0 * np.random.rand()
      # cuDNN requires beta >= 0.01.
      beta = 0.01 + 1.0 * np.random.rand()
      if dtype == dtypes.float32:
        inp_array = np.random.rand(*shape).astype(np.float32)
      else:
        inp_array = np.random.rand(*shape).astype(np.float16)

      inp = constant_op.constant(
          list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
      lrn_op = nn.local_response_normalization(
          inp,
          name="lrn",
          depth_radius=lrn_depth_radius,
          bias=bias,
          alpha=alpha,
          beta=beta)
      err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
    print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
          " is ", err)
    if dtype == dtypes.float32:
      self.assertLess(err, 1e-4)
    else:
      self.assertLess(err, 1.0)
Пример #3
0
  def _RunAndVerifyGradients(self, dtype):
    with self.cached_session(use_gpu=True):
      # random shape
      shape = np.random.randint(1, 5, size=4)
      # Make depth at least 2 to make it meaningful
      shape[3] += 1
      # random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
      # be in [1, 7].
      lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
      bias = 1.0 + np.random.rand()
      alpha = 1.0 * np.random.rand()
      # cuDNN requires beta >= 0.01.
      beta = 0.01 + 1.0 * np.random.rand()
      if dtype == dtypes.float32:
        inp_array = np.random.rand(*shape).astype(np.float32)
      else:
        inp_array = np.random.rand(*shape).astype(np.float16)

      inp = constant_op.constant(
          list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
      lrn_op = nn.local_response_normalization(
          inp,
          name="lrn",
          depth_radius=lrn_depth_radius,
          bias=bias,
          alpha=alpha,
          beta=beta)
      err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
    print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
          " is ", err)
    if dtype == dtypes.float32:
      self.assertLess(err, 1e-4)
    else:
      self.assertLess(err, 1.0)
Пример #4
0
 def testGradientsZeroInput(self):
   with self.test_session(use_gpu=True):
     shape = [4, 4, 4, 4]
     p = array_ops.placeholder(dtypes.float32, shape=shape)
     inp_array = np.zeros(shape).astype("f")
     lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
     grad = gradients_impl.gradients([lrn_op], [p])[0]
     params = {p: inp_array}
     r = grad.eval(feed_dict=params)
   expected = np.ones(shape).astype("f")
   self.assertAllClose(r, expected)
   self.assertShapeEqual(expected, grad)
Пример #5
0
 def testGradientsZeroInput(self):
   with self.session(use_gpu=True):
     shape = [4, 4, 4, 4]
     p = array_ops.placeholder(dtypes.float32, shape=shape)
     inp_array = np.zeros(shape).astype("f")
     lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
     grad = gradients_impl.gradients([lrn_op], [p])[0]
     params = {p: inp_array}
     r = grad.eval(feed_dict=params)
   expected = np.ones(shape).astype("f")
   self.assertAllClose(r, expected)
   self.assertShapeEqual(expected, grad)
Пример #6
0
  def _RunAndVerify(self, dtype):
    with self.test_session(use_gpu=True):
      # random shape
      shape = np.random.randint(1, 16, size=4)
      # Make depth at least 2 to make it meaningful
      shape[3] += 1
      p = array_ops.placeholder(dtype, shape=shape)
      # random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
      # be in [1, 7].
      lrn_depth_radius = np.random.randint(1, min(8, shape[3]))

      bias = 1.0 + np.random.rand()
      alpha = 2.0 * np.random.rand()
      # cuDNN requires beta >= 0.01.
      beta = 0.01 + 2.0 * np.random.rand()
      lrn_t = nn.local_response_normalization(
          p,
          name="lrn",
          depth_radius=lrn_depth_radius,
          bias=bias,
          alpha=alpha,
          beta=beta)
      params = {p: np.random.rand(*shape).astype("f")}
      result = lrn_t.eval(feed_dict=params)
    expected = self._LRN(
        params[p],
        lrn_depth_radius=lrn_depth_radius,
        bias=bias,
        alpha=alpha,
        beta=beta)
    err = np.amax(np.abs(result - expected))
    print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
          err)
    if dtype == dtypes.float32:
      self.assertTrue(err < 1e-4)
    else:
      self.assertTrue(err < 1e-2)
    self.assertShapeEqual(expected, lrn_t)