def _test_op_backward_pass(self, on_gpu, dtype, tol):
        np.random.seed(13)
        data_width = 5
        data_height = 4
        data_channels = 3
        warp_width = 2
        warp_height = 6
        batch_size = 3

        warp = _make_warp(batch_size, warp_height, warp_width,
                          dtype.as_numpy_dtype)
        data_shape = (batch_size, data_height, data_width, data_channels)
        data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)

        with self.test_session(use_gpu=on_gpu, force_gpu=False):
            data_tensor = constant_op.constant(data)
            warp_tensor = constant_op.constant(warp)
            output_tensor = resampler.resampler(data=data_tensor,
                                                warp=warp_tensor)

            grads = test.compute_gradient([data_tensor, warp_tensor], [
                data_tensor.get_shape().as_list(),
                warp_tensor.get_shape().as_list()
            ], output_tensor,
                                          output_tensor.get_shape().as_list(),
                                          [data, warp])

            if not on_gpu:
                # On CPU we perform numerical differentiation at the best available
                # precision, and compare against that. This is necessary for test to
                # pass for float16.
                data_tensor_64 = constant_op.constant(data,
                                                      dtype=dtypes.float64)
                warp_tensor_64 = constant_op.constant(warp,
                                                      dtype=dtypes.float64)
                output_tensor_64 = resampler.resampler(data=data_tensor_64,
                                                       warp=warp_tensor_64)
                grads_64 = test.compute_gradient(
                    [data_tensor_64, warp_tensor_64], [
                        data_tensor.get_shape().as_list(),
                        warp_tensor.get_shape().as_list()
                    ], output_tensor_64,
                    output_tensor.get_shape().as_list(), [data, warp])

                for g, g_64 in zip(grads, grads_64):
                    self.assertLess(np.fabs(g[0] - g_64[1]).max(), tol)

            else:
                for g in grads:
                    self.assertLess(np.fabs(g[0] - g[1]).max(), tol)
  def _test_op_backward_pass(self, on_gpu, dtype, tol):
    np.random.seed(13)
    data_width = 5
    data_height = 4
    data_channels = 3
    warp_width = 2
    warp_height = 6
    batch_size = 10

    warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype)
    data_shape = (batch_size, data_height, data_width, data_channels)
    data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype)

    with self.test_session(use_gpu=on_gpu, force_gpu=False):
      data_tensor = constant_op.constant(data)
      warp_tensor = constant_op.constant(warp)
      output_tensor = resampler.resampler(data=data_tensor, warp=warp_tensor)

      grads = test.compute_gradient([data_tensor, warp_tensor], [
          data_tensor.get_shape().as_list(),
          warp_tensor.get_shape().as_list()
      ], output_tensor, output_tensor.get_shape().as_list(), [data, warp])

      if not on_gpu:
        # On CPU we perform numerical differentiation at the best available
        # precision, and compare against that. This is necessary for test to
        # pass for float16.
        data_tensor_64 = constant_op.constant(data, dtype=dtypes.float64)
        warp_tensor_64 = constant_op.constant(warp, dtype=dtypes.float64)
        output_tensor_64 = resampler.resampler(data=data_tensor_64,
                                               warp=warp_tensor_64)
        grads_64 = test.compute_gradient([data_tensor_64, warp_tensor_64], [
            data_tensor.get_shape().as_list(),
            warp_tensor.get_shape().as_list()
        ], output_tensor_64, output_tensor.get_shape().as_list(), [data, warp])

        for g, g_64 in zip(grads, grads_64):
          self.assertLess(np.fabs(g[0] - g_64[1]).max(), tol)

      else:
        for g in grads:
          self.assertLess(np.fabs(g[0] - g[1]).max(), tol)
Example #3
0
  def _checkGradReal(self, func, x, use_gpu=False):
    with self.test_session(use_gpu=use_gpu):
      inx = ops.convert_to_tensor(x)
      # func is a forward RFFT function (batched or unbatched).
      z = func(inx)
      # loss = sum(|z|^2)
      loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
      x_jacob_t, x_jacob_n = test.compute_gradient(
          inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)

    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)