Пример #1
0
  def compareToTranspose(self, batch_size, in_height, in_width, out_channels,
                         block_size, data_format, use_gpu):
    in_channels = out_channels * block_size * block_size
    nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
    nchw_input_shape = [batch_size, in_channels, in_height, in_width]
    total_size = np.prod(nhwc_input_shape)

    if data_format == "NCHW_VECT_C":
      # Initialize the input tensor with qint8 values that circle -127..127.
      x = [((f + 128) % 255) - 127 for f in range(total_size)]
      t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
      expected = self.depthToSpaceUsingTranspose(t, block_size, "NHWC")
      t = test_util.NHWCToNCHW_VECT_C(t)
      t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
      t = array_ops.depth_to_space(t, block_size, data_format="NCHW_VECT_C")
      t = gen_array_ops.dequantize(t, -128, 127)
      actual = test_util.NCHW_VECT_CToNHWC(t)
    else:
      # Initialize the input tensor with ascending whole numbers as floats.
      x = [f * 1.0 for f in range(total_size)]
      shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
      t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
      expected = self.depthToSpaceUsingTranspose(t, block_size, data_format)
      actual = array_ops.depth_to_space(t, block_size, data_format=data_format)

    with self.test_session(use_gpu=use_gpu) as sess:
      actual_vals, expected_vals = sess.run([actual, expected])
      self.assertTrue(np.array_equal(actual_vals, expected_vals))
Пример #2
0
    def compareToTranspose(self, batch_size, in_height, in_width, out_channels,
                           block_size, data_format, use_gpu):
        in_channels = out_channels * block_size * block_size
        nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
        nchw_input_shape = [batch_size, in_channels, in_height, in_width]
        total_size = np.prod(nhwc_input_shape)

        if data_format == "NCHW_VECT_C":
            # Initialize the input tensor with qint8 values that circle -127..127.
            x = [((f + 128) % 255) - 127 for f in range(total_size)]
            t = constant_op.constant(x,
                                     shape=nhwc_input_shape,
                                     dtype=dtypes.float32)
            expected = self.depthToSpaceUsingTranspose(t, block_size, "NHWC")
            t = test_util.NHWCToNCHW_VECT_C(t)
            t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
            t = array_ops.depth_to_space(t,
                                         block_size,
                                         data_format="NCHW_VECT_C")
            t = gen_array_ops.dequantize(t, -128, 127)
            actual = test_util.NCHW_VECT_CToNHWC(t)
        else:
            # Initialize the input tensor with ascending whole numbers as floats.
            x = [f * 1.0 for f in range(total_size)]
            shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
            t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
            expected = self.depthToSpaceUsingTranspose(t, block_size,
                                                       data_format)
            actual = array_ops.depth_to_space(t,
                                              block_size,
                                              data_format=data_format)

        with self.test_session(use_gpu=use_gpu) as sess:
            actual_vals, expected_vals = sess.run([actual, expected])
            self.assertTrue(np.array_equal(actual_vals, expected_vals))
Пример #3
0
    def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
        input_nhwc = math_ops.cast(inputs, dtype)
        with self.cached_session(use_gpu=False):
            # test NHWC (default) on CPU
            x_tf = array_ops.depth_to_space(input_nhwc, block_size)
            self.assertAllEqual(x_tf.eval(), outputs)

            # Run this test only if only CPU device is available
            if all(x.device_type == "CPU"
                   for x in device_lib.list_local_devices()):
                input_nchw = test_util.NHWCToNCHW(input_nhwc)
                output_nchw = array_ops.depth_to_space(input_nchw,
                                                       block_size,
                                                       data_format="NCHW")
                output_nhwc = test_util.NCHWToNHWC(output_nchw)
                with self.assertRaisesRegex(
                        errors_impl.InvalidArgumentError,
                        "No OpKernel was registered to support Op 'DepthToSpace'"
                ):
                    output_nhwc.eval()

        if test.is_gpu_available():
            with self.cached_session(use_gpu=True):
                # test NHWC (default) on GPU
                x_tf = array_ops.depth_to_space(input_nhwc, block_size)
                self.assertAllEqual(x_tf.eval(), outputs)
                # test NCHW on GPU
                input_nchw = test_util.NHWCToNCHW(input_nhwc)
                output_nchw = array_ops.depth_to_space(input_nchw,
                                                       block_size,
                                                       data_format="NCHW")
                output_nhwc = test_util.NCHWToNHWC(output_nchw)
                self.assertAllEqual(output_nhwc.eval(), outputs)
Пример #4
0
 def testBlockSize0(self):
   x_np = [[[[1], [2]],
            [[3], [4]]]]
   block_size = 0
   with self.assertRaises(ValueError):
     out_tf = array_ops.depth_to_space(x_np, block_size)
     self.evaluate(out_tf)
    def compareToTranspose(self, data_format, batch_size, in_height, in_width,
                           out_channels, block_size, use_gpu):
        if use_gpu and not test.is_gpu_available():
            print("gpu not available")
            return

        dtype = dtypes.float32
        in_channels = out_channels * block_size * block_size

        if data_format == "NHWC":
            input_shape = [batch_size, in_height, in_width, in_channels]
        elif data_format == "NCHW":
            input_shape = [batch_size, in_channels, in_height, in_width]
        else:
            assert False, "unsupported format"

        # Initialize the input tensor with ascending whole numbers.
        total_size = 1
        for dim_size in input_shape:
            total_size *= dim_size
        x = [f for f in range(total_size)]
        inputs = constant_op.constant(x, shape=input_shape, dtype=dtype)

        expected = self.depthToSpaceUsingTranspose(inputs, block_size,
                                                   data_format)
        actual = array_ops.depth_to_space(inputs,
                                          block_size,
                                          data_format=data_format)

        with self.test_session(use_gpu=use_gpu) as sess:
            actual_vals, expected_vals = sess.run([actual, expected])
            self.assertTrue(np.array_equal(actual_vals, expected_vals))
Пример #6
0
 def testBlockSize0(self):
   x_np = [[[[1], [2]],
            [[3], [4]]]]
   block_size = 0
   with self.assertRaises(ValueError):
     out_tf = array_ops.depth_to_space(x_np, block_size)
     out_tf.eval()
Пример #7
0
  def testBatchSize0(self):
    block_size = 2
    batch_size = 0
    input_nhwc = array_ops.ones([batch_size, 2, 3, 12])
    x_out = array_ops.ones([batch_size, 4, 6, 3])

    with self.cached_session(use_gpu=False):
      # test NHWC (default) on CPU
      x_tf = array_ops.depth_to_space(input_nhwc, block_size)
      self.assertAllEqual(x_tf.shape, x_out.shape)
      self.evaluate(x_tf)
    if test.is_gpu_available():
      with self.cached_session(use_gpu=True):
        # test NHWC (default) on GPU
        x_tf = array_ops.depth_to_space(input_nhwc, block_size)
        self.assertAllEqual(x_tf.shape, x_out.shape)
        self.evaluate(x_tf)
Пример #8
0
 def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
   input_nhwc = math_ops.cast(inputs, dtype)
   with self.cached_session(use_gpu=False):
     # test NHWC (default) on CPU
     x_tf = array_ops.depth_to_space(input_nhwc, block_size)
     self.assertAllEqual(x_tf.eval(), outputs)
   if test.is_gpu_available():
     with self.cached_session(use_gpu=True):
       # test NHWC (default) on GPU
       x_tf = array_ops.depth_to_space(input_nhwc, block_size)
       self.assertAllEqual(x_tf.eval(), outputs)
       # test NCHW on GPU
       input_nchw = test_util.NHWCToNCHW(input_nhwc)
       output_nchw = array_ops.depth_to_space(
           input_nchw, block_size, data_format="NCHW")
       output_nhwc = test_util.NCHWToNHWC(output_nchw)
       self.assertAllEqual(output_nhwc.eval(), outputs)
Пример #9
0
def _SpaceToDepthGrad(op, grad):
  # Its gradient is the opposite op: DepthToSpace.
  block_size = op.get_attr("block_size")
  data_format = op.get_attr("data_format")
  if data_format == "NCHW_VECT_C":
    raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. "
                     "NCHW_VECT_C requires qint8 data type.")
  return array_ops.depth_to_space(grad, block_size, data_format=data_format)
    def testBatchSize0(self):
        block_size = 2
        batch_size = 0
        input_nhwc = array_ops.ones([batch_size, 2, 3, 12])
        x_out = array_ops.ones([batch_size, 4, 6, 3])

        with self.test_session(use_gpu=False):
            # test NHWC (default) on CPU
            x_tf = array_ops.depth_to_space(input_nhwc, block_size)
            self.assertAllEqual(x_tf.shape, x_out.shape)
            x_tf.eval()
        if test.is_gpu_available():
            with self.test_session(use_gpu=True):
                # test NHWC (default) on GPU
                x_tf = array_ops.depth_to_space(input_nhwc, block_size)
                self.assertAllEqual(x_tf.shape, x_out.shape)
                x_tf.eval()
Пример #11
0
 def _testOne(self, inputs, block_size, outputs):
   input_nhwc = math_ops.to_float(inputs)
   with self.test_session(use_gpu=False):
     # test NHWC (default) on CPU
     x_tf = array_ops.depth_to_space(input_nhwc, block_size)
     self.assertAllEqual(x_tf.eval(), outputs)
   if test.is_gpu_available():
     with self.test_session(use_gpu=True):
       # test NHWC (default) on GPU
       x_tf = array_ops.depth_to_space(input_nhwc, block_size)
       self.assertAllEqual(x_tf.eval(), outputs)
       # test NCHW on GPU
       input_nchw = test_util.NHWCToNCHW(input_nhwc)
       output_nchw = array_ops.depth_to_space(
           input_nchw, block_size, data_format="NCHW")
       output_nhwc = test_util.NCHWToNHWC(output_nchw)
       self.assertAllEqual(output_nhwc.eval(), outputs)
Пример #12
0
def _SpaceToDepthGrad(op, grad):
  # Its gradient is the opposite op: DepthToSpace.
  block_size = op.get_attr("block_size")
  data_format = op.get_attr("data_format")
  if data_format == "NCHW_VECT_C":
    raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. "
                     "NCHW_VECT_C requires qint8 data type.")
  return array_ops.depth_to_space(grad, block_size, data_format=data_format)
Пример #13
0
 def testBlockSizeOne(self):
   x_np = [[[[1, 1, 1, 1],
             [2, 2, 2, 2]],
            [[3, 3, 3, 3],
             [4, 4, 4, 4]]]]
   block_size = 1
   with self.assertRaises(ValueError):
     out_tf = array_ops.depth_to_space(x_np, block_size)
     self.evaluate(out_tf)
Пример #14
0
 def testBlockSizeOne(self):
   x_np = [[[[1, 1, 1, 1],
             [2, 2, 2, 2]],
            [[3, 3, 3, 3],
             [4, 4, 4, 4]]]]
   block_size = 1
   with self.assertRaises(ValueError):
     out_tf = array_ops.depth_to_space(x_np, block_size)
     out_tf.eval()
Пример #15
0
 def testBlockSizeTooLarge(self):
     x_np = [[[[1, 2, 3, 4], [5, 6, 7, 8]],
              [[9, 10, 11, 12], [13, 14, 15, 16]]]]
     block_size = 4
     # Raise an exception, since th depth is only 4 and needs to be
     # divisible by 16.
     with self.assertRaises(ValueError):
         out_tf = array_ops.depth_to_space(x_np, block_size)
         out_tf.eval()
 def testDepthToSpaceTranspose(self):
     x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7])
     block_size = 2
     crops = np.zeros((2, 2), dtype=np.int32)
     y1 = self.batch_to_space(x, crops, block_size=block_size)
     y2 = array_ops.transpose(
         array_ops.depth_to_space(array_ops.transpose(x, [3, 1, 2, 0]),
                                  block_size=block_size), [3, 1, 2, 0])
     with self.test_session():
         self.assertAllEqual(y1.eval(), y2.eval())
Пример #17
0
 def testDepthToSpaceTranspose(self):
   x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7])
   block_size = 2
   crops = np.zeros((2, 2), dtype=np.int32)
   y1 = self.batch_to_space(x, crops, block_size=block_size)
   y2 = array_ops.transpose(
       array_ops.depth_to_space(
           array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
       [3, 1, 2, 0])
   with self.test_session():
     self.assertAllEqual(y1.eval(), y2.eval())
Пример #18
0
 def testBlockSizeTooLarge(self):
   x_np = [[[[1, 2, 3, 4],
             [5, 6, 7, 8]],
            [[9, 10, 11, 12],
             [13, 14, 15, 16]]]]
   block_size = 4
   # Raise an exception, since th depth is only 4 and needs to be
   # divisible by 16.
   with self.assertRaises(ValueError):
     out_tf = array_ops.depth_to_space(x_np, block_size)
     out_tf.eval()
 def testDepthToSpaceTranspose(self):
     x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7])
     block_size = 2
     for crops_dtype in [dtypes.int64, dtypes.int32]:
         crops = array_ops.zeros((2, 2), dtype=crops_dtype)
         y1 = self.batch_to_space(x, crops, block_size=block_size)
         y2 = array_ops.transpose(
             array_ops.depth_to_space(array_ops.transpose(x, [3, 1, 2, 0]),
                                      block_size=block_size), [3, 1, 2, 0])
         with self.cached_session():
             self.assertAllEqual(y1, y2)
Пример #20
0
  def testDepthToSpace(self):
    for dtype in self.numeric_types:
      self._assertOpOutputMatchesExpected(
          lambda x: array_ops.depth_to_space(x, block_size=2),
          np.array([[[[1, 2, 3, 4]]]], dtype=dtype),
          expected=np.array([[[[1], [2]],
                              [[3], [4]]]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          lambda x: array_ops.depth_to_space(x, block_size=2),
          np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
          expected=np.array([[[[1, 2, 3], [4, 5, 6]],
                              [[7, 8, 9], [10, 11, 12]]]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          lambda x: array_ops.depth_to_space(x, block_size=2),
          np.array([[[[1, 2, 3, 4],
                      [5, 6, 7, 8]],
                     [[9, 10, 11, 12],
                      [13, 14, 15, 16]]]], dtype=dtype),
          expected=np.array([[[[1], [2], [5], [6]],
                              [[3], [4], [7], [8]],
                              [[9], [10], [13], [14]],
                              [[11], [12], [15], [16]]]], dtype=dtype))
Пример #21
0
  def _checkGrad(self, x, block_size):
    assert 4 == x.ndim
    with self.test_session(use_gpu=True):
      tf_x = ops.convert_to_tensor(x)
      tf_y = array_ops.depth_to_space(tf_x, block_size)
      epsilon = 1e-2
      ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
          tf_x,
          x.shape,
          tf_y,
          tf_y.get_shape().as_list(),
          x_init_value=x,
          delta=epsilon)

    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
    def _checkGrad(self, x, block_size):
        assert 4 == x.ndim
        with self.test_session(use_gpu=True):
            tf_x = ops.convert_to_tensor(x)
            tf_y = array_ops.depth_to_space(tf_x, block_size)
            epsilon = 1e-2
            ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
                tf_x,
                x.shape,
                tf_y,
                tf_y.get_shape().as_list(),
                x_init_value=x,
                delta=epsilon)

        self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
Пример #23
0
  def testDepthToSpace(self):
    for dtype in self.numeric_types:
      self._assertOpOutputMatchesExpected(
          lambda x: array_ops.depth_to_space(x, block_size=2),
          np.array([[[[1, 2, 3, 4]]]], dtype=dtype),
          expected=np.array([[[[1], [2]],
                              [[3], [4]]]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          lambda x: array_ops.depth_to_space(x, block_size=2),
          np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
          expected=np.array([[[[1, 2, 3], [4, 5, 6]],
                              [[7, 8, 9], [10, 11, 12]]]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          lambda x: array_ops.depth_to_space(x, block_size=2),
          np.array([[[[1, 2, 3, 4],
                      [5, 6, 7, 8]],
                     [[9, 10, 11, 12],
                      [13, 14, 15, 16]]]], dtype=dtype),
          expected=np.array([[[[1], [2], [5], [6]],
                              [[3], [4], [7], [8]],
                              [[9], [10], [13], [14]],
                              [[11], [12], [15], [16]]]], dtype=dtype))
Пример #24
0
  def _checkGrad(self, x, block_size, data_format):
    # NCHW is implemented for only GPU.
    if data_format == "NCHW" and not test.is_gpu_available():
      return

    assert 4 == x.ndim
    with self.cached_session(use_gpu=True):
      tf_x = ops.convert_to_tensor(x)
      tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)

      epsilon = 1e-2
      ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
          tf_x,
          x.shape,
          tf_y,
          tf_y.get_shape().as_list(),
          x_init_value=x,
          delta=epsilon)
      self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
Пример #25
0
  def _checkGrad(self, x, block_size, data_format):
    # NCHW is implemented for only GPU.
    if data_format == "NCHW" and not test.is_gpu_available():
      return

    assert 4 == x.ndim
    with self.cached_session(use_gpu=True):
      tf_x = ops.convert_to_tensor(x)
      tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)

      epsilon = 1e-2
      ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
          tf_x,
          x.shape,
          tf_y,
          tf_y.get_shape().as_list(),
          x_init_value=x,
          delta=epsilon)
      self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
Пример #26
0
 def loop_fn(i):
     x1 = array_ops.gather(x, i)
     return array_ops.depth_to_space(x1, 2, data_format="NHWC")
Пример #27
0
 def op(x):
   return array_ops.depth_to_space(x, block_size=2,
                                   data_format=data_format)
Пример #28
0
 def op(x):
   return array_ops.depth_to_space(
       x, block_size=2, data_format=data_format)
Пример #29
0
 def _testOne(self, inputs, block_size, outputs):
   with self.test_session(use_gpu=True):
     x_tf = array_ops.depth_to_space(math_ops.to_float(inputs), block_size)
     self.assertAllEqual(x_tf.eval(), outputs)
Пример #30
0
 def testUnknownShape(self):
     t = array_ops.depth_to_space(array_ops.placeholder(dtypes.float32),
                                  block_size=4)
     self.assertEqual(4, t.get_shape().ndims)
Пример #31
0
 def testUnknownShape(self):
   t = array_ops.depth_to_space(
       array_ops.placeholder(dtypes.float32), block_size=4)
   self.assertEqual(4, t.get_shape().ndims)
Пример #32
0
def _SpaceToDepthGrad(op, grad):
  # Its gradient is the opposite op: DepthToSpace.
  block_size = op.get_attr("block_size")
  return array_ops.depth_to_space(grad, block_size)
def _SpaceToDepthGrad(op, grad):
    # Its gradient is the opposite op: DepthToSpace.
    block_size = op.get_attr("block_size")
    return array_ops.depth_to_space(grad, block_size)
 def _testOne(self, inputs, block_size, outputs):
     with self.test_session(use_gpu=True):
         x_tf = array_ops.depth_to_space(math_ops.to_float(inputs),
                                         block_size)
         self.assertAllEqual(x_tf.eval(), outputs)