Ejemplo n.º 1
0
def rebuild_image():
  h_fc1 = tf.nn.relu(tf.matmul(y_ + b_fc2, W_fc2))
  h_pool2_flat = tf.matmul(h_fc1 + b_fc1, W_fc1)
  h_pool2 = tf.reshape(h_pool2_flat, [-1, 7, 7, conv2_size]) # I think that's right...
  h_conv2 = tf.image.resize_images(h_pool2,14,14, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
  h_pool1 = nn_ops.conv2d_transpose(h_conv2 + b_conv2,W_conv2,
      [class_size,14,14,conv1_size],[1,1,1,1])
  h_conv1 = tf.image.resize_images(h_pool1 ,28,28,method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
  x_image = nn_ops.conv2d_transpose(h_conv1 + b_conv1, W_conv1, [class_size,28,28,1], [1,1,1,1])
  x_image = tf.nn.relu(x_image)
  return x_image
Ejemplo n.º 2
0
    def testConv2DTransposeValid(self):
        with self.cached_session():
            for dtype in (dtypes.float32, dtypes.int32):
                strides = [1, 2, 2, 1]

                # Input, output: [batch, height, width, depth]
                x_shape = [2, 6, 4, 3]
                y_shape = [2, 13, 9, 2]

                # Filter: [kernel_height, kernel_width, output_depth, input_depth]
                f_shape = [3, 3, 2, 3]

                x = constant_op.constant(1,
                                         shape=x_shape,
                                         name="x",
                                         dtype=dtype)
                f = constant_op.constant(1,
                                         shape=f_shape,
                                         name="filter",
                                         dtype=dtype)
                output = nn_ops.conv2d_transpose(x,
                                                 f,
                                                 y_shape,
                                                 strides=strides,
                                                 padding="VALID")
                value = self.evaluate(output)

                cache_values = np.zeros(y_shape, dtype=np.float32)

                # The amount of padding added
                pad = 1

                for n in xrange(x_shape[0]):
                    for k in xrange(f_shape[2]):
                        for w in xrange(pad, y_shape[2] - pad):
                            for h in xrange(pad, y_shape[1] - pad):
                                target = 3
                                # We add a case for locations divisible by the stride.
                                h_in = h % strides[
                                    1] == 0 and h > pad and h < y_shape[
                                        1] - 1 - pad
                                w_in = w % strides[
                                    2] == 0 and w > pad and w < y_shape[
                                        2] - 1 - pad
                                if h_in and w_in:
                                    target += 9
                                elif h_in or w_in:
                                    target += 3
                                cache_values[n, h, w, k] = target

                        # copy values in the border
                        cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
                        cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
                        cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
                        cache_values[n, -1, :, k] = cache_values[n, -2, :, k]

                if dtype.is_integer:
                    self.assertAllEqual(cache_values, value)
                else:
                    self.assertAllClose(cache_values, value)
Ejemplo n.º 3
0
def _test_transpose_conv(tensor_in_sizes, filter_in_sizes, output_shape, strides, padding):
    """ One iteration of transpose convolution with given shapes and attributes """

    total_size_1 = 1
    total_size_2 = 1
    for s in tensor_in_sizes:
        total_size_1 *= s
    for s in filter_in_sizes:
        total_size_2 *= s
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
    filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')
        in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')
        strides = [1] + strides + [1]
        # in_filter layout is HWOI
        out = nn_ops.conv2d_transpose(in_data,
                                      in_filter,
                                      output_shape=output_shape,
                                      strides=strides,
                                      padding=padding)
        data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')
        compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])
  def testConv2DTransposeSame(self):
    with self.test_session():
      strides = [1, 2, 2, 1]

      # Input, output: [batch, height, width, depth]
      x_shape = [2, 6, 4, 3]
      y_shape = [2, 12, 8, 2]

      # Filter: [kernel_height, kernel_width, output_depth, input_depth]
      f_shape = [3, 3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv2d_transpose(
          x, f, y_shape, strides=strides, padding="SAME")
      value = output.eval()

      for n in xrange(x_shape[0]):
        for k in xrange(f_shape[2]):
          for w in xrange(y_shape[2]):
            for h in xrange(y_shape[1]):
              target = 3.0
              # We add a case for locations divisible by the stride.
              h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
              w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
              if h_in and w_in:
                target += 9.0
              elif h_in or w_in:
                target += 3.0
              self.assertAllClose(target, value[n, h, w, k])
Ejemplo n.º 5
0
 def GetParams(self):
   """Testing conversion of conv2d_transpose (AKA Conv2DBackpropInput)"""
   np.random.seed(1234)
   dtype = dtypes.float32
   input_name = "input"
   n, c, h, w = 13, 3, 7, 11
   num_filters = 8
   input_dims = [n, c, h, w]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
     with g.device("/GPU:0"):
       weights_shape = [2, 2, num_filters, c]
       weights = constant_op.constant(
           np.random.randn(*weights_shape), dtype=dtype)
       output_shape = constant_op.constant([n, num_filters, h * 2, w * 2],
                                           dtype=dtypes.int32)
       output = nn_ops.conv2d_transpose(
           inp,
           weights,
           output_shape,
           strides=[1, 1, 2, 2],
           padding="SAME",
           data_format="NCHW")
       output = array_ops.identity(output, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[[input_dims]],
       output_names=[output_name],
       expected_output_dims=[[[n, num_filters, h * 2, w * 2]]])
Ejemplo n.º 6
0
def rebuild_image():
    h_fc1 = tf.nn.relu(tf.matmul(y_ + b_fc2, W_fc2))
    h_pool2_flat = tf.matmul(h_fc1 + b_fc1, W_fc1)
    h_pool2 = tf.reshape(h_pool2_flat,
                         [-1, 7, 7, conv2_size])  # I think that's right...
    h_conv2 = tf.image.resize_images(
        h_pool2, 14, 14, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    h_pool1 = nn_ops.conv2d_transpose(h_conv2 + b_conv2, W_conv2,
                                      [class_size, 14, 14, conv1_size],
                                      [1, 1, 1, 1])
    h_conv1 = tf.image.resize_images(
        h_pool1, 28, 28, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    x_image = nn_ops.conv2d_transpose(h_conv1 + b_conv1, W_conv1,
                                      [class_size, 28, 28, 1], [1, 1, 1, 1])
    x_image = tf.nn.relu(x_image)
    return x_image
Ejemplo n.º 7
0
  def testConv2DTransposeSameNCHW(self):
    # `NCHW` data format is only supported for CUDA device.
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True):
        strides = [1, 1, 2, 2]

        # Input, output: [batch, depth, height, width]
        x_shape = [2, 3, 6, 4]
        y_shape = [2, 2, 12, 8]

        # Filter: [kernel_height, kernel_width, output_depth, input_depth]
        f_shape = [3, 3, 2, 3]

        x = constant_op.constant(
            1.0, shape=x_shape, name="x", dtype=dtypes.float32)
        f = constant_op.constant(
            1.0, shape=f_shape, name="filter", dtype=dtypes.float32)

        output = nn_ops.conv2d_transpose(
            x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")

        value = self.evaluate(output)
        for n in xrange(x_shape[0]):
          for k in xrange(f_shape[2]):
            for w in xrange(y_shape[3]):
              for h in xrange(y_shape[2]):
                target = 3.0
                # We add a case for locations divisible by the stride.
                h_in = h % strides[2] == 0 and h > 0 and h < y_shape[2] - 1
                w_in = w % strides[3] == 0 and w > 0 and w < y_shape[3] - 1
                if h_in and w_in:
                  target += 9.0
                elif h_in or w_in:
                  target += 3.0
                self.assertAllClose(target, value[n, k, h, w])
Ejemplo n.º 8
0
  def testAtrousConv2DTransposeForward(self):
    with self.session(use_gpu=True):
      # Input: [batch, height, width, input_depth]
      height = 9
      for width in [9, 10]:  # Test both odd and even width.
        x_shape = [2, height, width, 2]
        x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)

        # Filter: [kernel_height, kernel_width, input_depth, output_depth]
        for kernel_height in range(1, 4):
          for kernel_width in range(1, 4):
            f_shape = [kernel_height, kernel_width, 2, 2]
            f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)

            for rate in range(1, 4):
              f_up = _upsample_filters(f, rate)
              kernel_height_up = (kernel_height + (kernel_height - 1) *
                                  (rate - 1))
              kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)

              for padding in ["SAME", "VALID"]:
                if padding == "SAME":
                  y_shape = [2, height, width, 2]
                else:
                  y_shape = [
                      2, height + kernel_height_up - 1,
                      width + kernel_width_up - 1, 2
                  ]

                y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
                                                    padding)
                y2 = nn_ops.conv2d_transpose(
                    x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
                self.assertAllClose(
                    y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
  def testConv2DTransposeSameNCHW(self):
    # `NCHW` data fomat is only supported for CUDA device.
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True):
        strides = [1, 1, 2, 2]

        # Input, output: [batch, depth, height, width]
        x_shape = [2, 3, 6, 4]
        y_shape = [2, 2, 12, 8]

        # Filter: [kernel_height, kernel_width, output_depth, input_depth]
        f_shape = [3, 3, 2, 3]

        x = constant_op.constant(
            1.0, shape=x_shape, name="x", dtype=dtypes.float32)
        f = constant_op.constant(
            1.0, shape=f_shape, name="filter", dtype=dtypes.float32)

        output = nn_ops.conv2d_transpose(
            x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")

        value = output.eval()
        for n in xrange(x_shape[0]):
          for k in xrange(f_shape[2]):
            for w in xrange(y_shape[3]):
              for h in xrange(y_shape[2]):
                target = 3.0
                # We add a case for locations divisible by the stride.
                h_in = h % strides[2] == 0 and h > 0 and h < y_shape[2] - 1
                w_in = w % strides[3] == 0 and w > 0 and w < y_shape[3] - 1
                if h_in and w_in:
                  target += 9.0
                elif h_in or w_in:
                  target += 3.0
                self.assertAllClose(target, value[n, k, h, w])
Ejemplo n.º 10
0
  def testConv2DTransposeSame(self):
    with self.cached_session():
      strides = [1, 2, 2, 1]

      # Input, output: [batch, height, width, depth]
      x_shape = [2, 6, 4, 3]
      y_shape = [2, 12, 8, 2]

      # Filter: [kernel_height, kernel_width, output_depth, input_depth]
      f_shape = [3, 3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv2d_transpose(
          x, f, y_shape, strides=strides, padding="SAME")
      value = self.evaluate(output)

      for n in xrange(x_shape[0]):
        for k in xrange(f_shape[2]):
          for w in xrange(y_shape[2]):
            for h in xrange(y_shape[1]):
              target = 3.0
              # We add a case for locations divisible by the stride.
              h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
              w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
              if h_in and w_in:
                target += 9.0
              elif h_in or w_in:
                target += 3.0
              self.assertAllClose(target, value[n, h, w, k])
Ejemplo n.º 11
0
  def testConv2DTransposeSingleStrideNCHW(self):
    # `NCHW` data format is only supported for CUDA device.
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True):
        strides = [1, 1, 1, 1]

        # Input, output: [batch, depth, height, width, depth]
        x_shape = [2, 3, 6, 4]
        y_shape = [2, 2, 6, 4]

        # Filter: [kernel_height, kernel_width, output_depth, input_depth]
        f_shape = [3, 3, 2, 3]

        x = constant_op.constant(
            1.0, shape=x_shape, name="x", dtype=dtypes.float32)
        f = constant_op.constant(
            1.0, shape=f_shape, name="filter", dtype=dtypes.float32)

        output = nn_ops.conv2d_transpose(
            x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")

        value = self.evaluate(output)
        for n in xrange(x_shape[0]):
          for k in xrange(f_shape[2]):
            for w in xrange(y_shape[3]):
              for h in xrange(y_shape[2]):
                target = 4 * 3.0
                h_in = h > 0 and h < y_shape[2] - 1
                w_in = w > 0 and w < y_shape[3] - 1
                if h_in and w_in:
                  target += 5 * 3.0
                elif h_in or w_in:
                  target += 2 * 3.0
                self.assertAllClose(target, value[n, k, h, w])
    def testConv2DTransposeValidNCHW(self):
        # `NCHW` data fomat is only supported for CUDA device.
        if test.is_gpu_available(cuda_only=True):
            with self.test_session(use_gpu=True):
                strides = [1, 1, 2, 2]

                # Input, output: [batch, depth, height, width]
                x_shape = [2, 3, 6, 4]
                y_shape = [2, 2, 13, 9]

                # Filter: [kernel_height, kernel_width, output_depth, input_depth]
                f_shape = [3, 3, 2, 3]

                x = constant_op.constant(1.0,
                                         shape=x_shape,
                                         name="x",
                                         dtype=dtypes.float32)
                f = constant_op.constant(1.0,
                                         shape=f_shape,
                                         name="filter",
                                         dtype=dtypes.float32)
                output = nn_ops.conv2d_transpose(x,
                                                 f,
                                                 y_shape,
                                                 strides=strides,
                                                 padding="VALID",
                                                 data_format="NCHW")

                value = output.eval()
                cache_values = np.zeros(y_shape, dtype=np.float32)
                # The amount of padding added
                pad = 1
                for n in xrange(x_shape[0]):
                    for k in xrange(f_shape[2]):
                        for w in xrange(pad, y_shape[3] - pad):
                            for h in xrange(pad, y_shape[2] - pad):
                                target = 3.0
                                # We add a case for locations divisible by the stride.
                                h_in = h % strides[
                                    2] == 0 and h > pad and h < y_shape[
                                        2] - 1 - pad
                                w_in = w % strides[
                                    3] == 0 and w > pad and w < y_shape[
                                        3] - 1 - pad
                                if h_in and w_in:
                                    target += 9.0
                                elif h_in or w_in:
                                    target += 3.0
                                cache_values[n, k, h, w] = target

                        # copy values in the border
                        cache_values[n, k, :, 0] = cache_values[n, k, :, 1]
                        cache_values[n, k, :, -1] = cache_values[n, k, :, -2]
                        cache_values[n, k, 0, :] = cache_values[n, k, 1, :]
                        cache_values[n, k, -1, :] = cache_values[n, k, -2, :]

                self.assertAllClose(cache_values, value)
 def testConv2DTransposeShapeInference(self):
   # Test case for 8972
   initializer = random_ops.truncated_normal(
       [3, 3, 5, 1], mean=0.0, stddev=0.01, dtype=dtypes.float32)
   x = variables.Variable(random_ops.random_normal([3, 10, 5, 1]))
   f = variable_scope.get_variable("f", initializer=initializer)
   f_shape = array_ops.stack([array_ops.shape(x)[0], 10, 5, 5])
   output = nn_ops.conv2d_transpose(
       x, f, f_shape, strides=[1, 1, 1, 1], padding="SAME")
   self.assertEqual(output.get_shape().as_list(), [None, 10, 5, 5])
Ejemplo n.º 14
0
 def testConv2DTransposeShapeInference(self):
   # Test case for 8972
   initializer = random_ops.truncated_normal(
       [3, 3, 5, 1], mean=0.0, stddev=0.01, dtype=dtypes.float32)
   x = variables.Variable(random_ops.random_normal([3, 10, 5, 1]))
   f = variable_scope.get_variable("f", initializer=initializer)
   f_shape = array_ops.stack([array_ops.shape(x)[0], 10, 5, 5])
   output = nn_ops.conv2d_transpose(
       x, f, f_shape, strides=[1, 1, 1, 1], padding="SAME")
   self.assertEqual(output.get_shape().as_list(), [3, 10, 5, 5])
Ejemplo n.º 15
0
    def testConv2DTransposeSingleStride(self):
        with self.cached_session():
            for dtype in (dtypes.float32, dtypes.int32):
                strides = [1, 1, 1, 1]

                # Input, output: [batch, height, width, depth]
                x_shape = [2, 6, 4, 3]
                y_shape = [2, 6, 4, 2]

                # Filter: [kernel_height, kernel_width, output_depth, input_depth]
                f_shape = [3, 3, 2, 3]

                x = constant_op.constant(1,
                                         shape=x_shape,
                                         name="x",
                                         dtype=dtype)
                f = constant_op.constant(1,
                                         shape=f_shape,
                                         name="filter",
                                         dtype=dtype)
                output = nn_ops.conv2d_transpose(x,
                                                 f,
                                                 y_shape,
                                                 strides=strides,
                                                 padding="SAME")
                value = self.evaluate(output)

                # We count the number of cells being added at the locations in the
                # output.
                # At the center, #cells=kernel_height * kernel_width
                # At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
                # At the borders, #cells=ceil(kernel_height/2)*kernel_width or
                #                        kernel_height * ceil(kernel_width/2)

                for n in xrange(x_shape[0]):
                    for k in xrange(f_shape[2]):
                        for w in xrange(y_shape[2]):
                            for h in xrange(y_shape[1]):
                                target = 4 * 3
                                h_in = h > 0 and h < y_shape[1] - 1
                                w_in = w > 0 and w < y_shape[2] - 1
                                if h_in and w_in:
                                    target += 5 * 3
                                elif h_in or w_in:
                                    target += 2 * 3
                                if dtype.is_integer:
                                    self.assertAllEqual(
                                        target, value[n, h, w, k])
                                else:
                                    self.assertAllClose(
                                        target, value[n, h, w, k])
  def testConv2DTransposeValid(self):
    with self.test_session():
      strides = [1, 2, 2, 1]

      # Input, output: [batch, height, width, depth]
      x_shape = [2, 6, 4, 3]
      y_shape = [2, 13, 9, 2]

      # Filter: [kernel_height, kernel_width, output_depth, input_depth]
      f_shape = [3, 3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv2d_transpose(
          x, f, y_shape, strides=strides, padding="VALID")
      value = output.eval()

      cache_values = np.zeros(y_shape, dtype=np.float32)

      # The amount of padding added
      pad = 1

      for n in xrange(x_shape[0]):
        for k in xrange(f_shape[2]):
          for w in xrange(pad, y_shape[2] - pad):
            for h in xrange(pad, y_shape[1] - pad):
              target = 3.0
              # We add a case for locations divisible by the stride.
              h_in = h % strides[1] == 0 and h > pad and h < y_shape[
                  1] - 1 - pad
              w_in = w % strides[2] == 0 and w > pad and w < y_shape[
                  2] - 1 - pad
              if h_in and w_in:
                target += 9.0
              elif h_in or w_in:
                target += 3.0
              cache_values[n, h, w, k] = target

          # copy values in the border
          cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
          cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
          cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
          cache_values[n, -1, :, k] = cache_values[n, -2, :, k]

    self.assertAllClose(cache_values, value)
  def testConv2DTransposeValidNCHW(self):
    # `NCHW` data fomat is only supported for CUDA device.
    if test.is_gpu_available(cuda_only=True):
      with self.test_session(use_gpu=True):
        strides = [1, 1, 2, 2]

        # Input, output: [batch, depth, height, width]
        x_shape = [2, 3, 6, 4]
        y_shape = [2, 2, 13, 9]

        # Filter: [kernel_height, kernel_width, output_depth, input_depth]
        f_shape = [3, 3, 2, 3]

        x = constant_op.constant(
            1.0, shape=x_shape, name="x", dtype=dtypes.float32)
        f = constant_op.constant(
            1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
        output = nn_ops.conv2d_transpose(
            x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")

        value = output.eval()
        cache_values = np.zeros(y_shape, dtype=np.float32)
        # The amount of padding added
        pad = 1
        for n in xrange(x_shape[0]):
          for k in xrange(f_shape[2]):
            for w in xrange(pad, y_shape[3] - pad):
              for h in xrange(pad, y_shape[2] - pad):
                target = 3.0
                # We add a case for locations divisible by the stride.
                h_in = h % strides[2] == 0 and h > pad and h < y_shape[
                    2] - 1 - pad
                w_in = w % strides[3] == 0 and w > pad and w < y_shape[
                    3] - 1 - pad
                if h_in and w_in:
                  target += 9.0
                elif h_in or w_in:
                  target += 3.0
                cache_values[n, k, h, w] = target

            # copy values in the border
            cache_values[n, k, :, 0] = cache_values[n, k, :, 1]
            cache_values[n, k, :, -1] = cache_values[n, k, :, -2]
            cache_values[n, k, 0, :] = cache_values[n, k, 1, :]
            cache_values[n, k, -1, :] = cache_values[n, k, -2, :]

        self.assertAllClose(cache_values, value)
Ejemplo n.º 18
0
 def GraphFn(self, inp):
     np.random.seed(1234)
     dtype = inp.dtype
     n, c, h, w = 13, 3, 7, 11
     num_filters = 8
     weights_shape = [2, 2, num_filters, c]
     weights = constant_op.constant(np.random.randn(*weights_shape),
                                    dtype=dtype)
     output_shape = constant_op.constant([n, num_filters, h * 2 + 1, w * 2],
                                         dtype=dtypes.int32)
     output = nn_ops.conv2d_transpose(inp,
                                      weights,
                                      output_shape,
                                      strides=[1, 1, 2, 2],
                                      padding="VALID",
                                      data_format="NCHW")
     return array_ops.identity(output, name="output_0")
Ejemplo n.º 19
0
def conv2d_decoder(inputs,
                   encoder,
                   shapes,
                   strides,
                   scope=None,
                   activation=None,
                   weight_sharing=False,
                   reuse=False):
    with variable_scope.variable_scope(scope or "decoder",
                                       reuse=reuse) as varscope:
        # Create a new scope in which the caching device is either
        # determined by the parent scope, or is set to place the cached
        if not context.executing_eagerly():
            if varscope.caching_device is None:
                varscope.set_caching_device(lambda op: op.device)

        encoder.reverse()
        shapes.reverse()
        strides.reverse()
        for idx, shape in enumerate(shapes):
            encoder_W = encoder[idx]
            dtype = encoder_W.dtype
            W = encoder_W if weight_sharing else variable_scope.get_variable(
                'w_{}'.format(idx),
                encoder_W.get_shape().as_list(),
                dtype,
                initializer=init_ops.variance_scaling_initializer())
            b = variable_scope.get_variable(
                'b_decoder_{}'.format(idx), [W.get_shape().as_list()[2]],
                dtype,
                initializer=init_ops.zeros_initializer())
            outputs = math_ops.add(
                nn_ops.conv2d_transpose(
                    inputs,
                    W,
                    array_ops.stack([
                        array_ops.shape(inputs)[0], shape[1], shape[2],
                        shape[3]
                    ]),
                    strides=[1, strides[idx], strides[idx], 1],
                    padding='SAME'), b)
            if activation:
                outputs = activation(outputs)
            inputs = outputs
        return inputs
Ejemplo n.º 20
0
 def testGradient(self):
   x_shape = [2, 6, 4, 3]
   f_shape = [3, 3, 2, 3]
   y_shape = [2, 12, 8, 2]
   strides = [1, 2, 2, 1]
   np.random.seed(1)  # Make it reproducible.
   x_val = np.random.random_sample(x_shape).astype(np.float64)
   f_val = np.random.random_sample(f_shape).astype(np.float64)
   with self.cached_session():
     x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
     f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
     output = nn_ops.conv2d_transpose(
         x, f, y_shape, strides=strides, padding="SAME")
     err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
                                                   output, y_shape)
   print("conv2d_transpose gradient err = %g " % err)
   err_tolerance = 0.0005
   self.assertLess(err, err_tolerance)
 def testGradient(self):
   x_shape = [2, 6, 4, 3]
   f_shape = [3, 3, 2, 3]
   y_shape = [2, 12, 8, 2]
   strides = [1, 2, 2, 1]
   np.random.seed(1)  # Make it reproducible.
   x_val = np.random.random_sample(x_shape).astype(np.float64)
   f_val = np.random.random_sample(f_shape).astype(np.float64)
   with self.test_session():
     x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
     f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
     output = nn_ops.conv2d_transpose(
         x, f, y_shape, strides=strides, padding="SAME")
     err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
                                                   output, y_shape)
   print("conv2d_transpose gradient err = %g " % err)
   err_tolerance = 0.0005
   self.assertLess(err, err_tolerance)
    def testConv2DTransposeSingleStrideNCHW(self):
        # `NCHW` data fomat is only supported for CUDA device.
        if test.is_gpu_available(cuda_only=True):
            with self.test_session(use_gpu=True):
                strides = [1, 1, 1, 1]

                # Input, output: [batch, depth, height, width, depth]
                x_shape = [2, 3, 6, 4]
                y_shape = [2, 2, 6, 4]

                # Filter: [kernel_height, kernel_width, output_depth, input_depth]
                f_shape = [3, 3, 2, 3]

                x = constant_op.constant(1.0,
                                         shape=x_shape,
                                         name="x",
                                         dtype=dtypes.float32)
                f = constant_op.constant(1.0,
                                         shape=f_shape,
                                         name="filter",
                                         dtype=dtypes.float32)

                output = nn_ops.conv2d_transpose(x,
                                                 f,
                                                 y_shape,
                                                 strides=strides,
                                                 padding="SAME",
                                                 data_format="NCHW")

                value = output.eval()
                for n in xrange(x_shape[0]):
                    for k in xrange(f_shape[2]):
                        for w in xrange(y_shape[3]):
                            for h in xrange(y_shape[2]):
                                target = 4 * 3.0
                                h_in = h > 0 and h < y_shape[2] - 1
                                w_in = w > 0 and w < y_shape[3] - 1
                                if h_in and w_in:
                                    target += 5 * 3.0
                                elif h_in or w_in:
                                    target += 2 * 3.0
                                self.assertAllClose(target, value[n, k, h, w])
  def testConv2DTransposeSingleStride(self):
    with self.test_session():
      strides = [1, 1, 1, 1]

      # Input, output: [batch, height, width, depth]
      x_shape = [2, 6, 4, 3]
      y_shape = [2, 6, 4, 2]

      # Filter: [kernel_height, kernel_width, output_depth, input_depth]
      f_shape = [3, 3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv2d_transpose(
          x, f, y_shape, strides=strides, padding="SAME")
      value = output.eval()

      # We count the number of cells being added at the locations in the output.
      # At the center, #cells=kernel_height * kernel_width
      # At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
      # At the borders, #cells=ceil(kernel_height/2)*kernel_width or
      #                        kernel_height * ceil(kernel_width/2)

      for n in xrange(x_shape[0]):
        for k in xrange(f_shape[2]):
          for w in xrange(y_shape[2]):
            for h in xrange(y_shape[1]):
              target = 4 * 3.0
              h_in = h > 0 and h < y_shape[1] - 1
              w_in = w > 0 and w < y_shape[2] - 1
              if h_in and w_in:
                target += 5 * 3.0
              elif h_in or w_in:
                target += 2 * 3.0
              self.assertAllClose(target, value[n, h, w, k])
Ejemplo n.º 24
0
    with open(dir + 'filter' + str(index) + '.png', "wb") as file:
        t = tf.constant(t)
        #t = tf.expand_dims(tf.constant(t), 0)
        #t_n = tf.squeeze(nn_ops.conv2d_transpose(t, W_conv1, [1,5,5,1],[1,1,1,1]), [0])
        t = tf.image.resize_images(t,
                                   50,
                                   50,
                                   method=tf.image.ResizeMethod.BICUBIC)
        t = tf.constant(ops.normalize(t.eval(), 0, 255))
        file.write(tf.image.encode_png(t).eval())
    index += 1

W_conv2_t = tf.transpose(W_conv2, perm=[3, 0, 1, 2])

index = 0
dir = "l2f/"
#for t in W_conv1_t.eval():
for t in W_conv2_t.eval():
    with open(dir + 'filter' + str(index) + '.png', "wb") as file:
        t = tf.expand_dims(tf.constant(t), 0)
        t_n = tf.squeeze(
            nn_ops.conv2d_transpose(t, W_conv1, [1, 5, 5, 1], [1, 1, 1, 1]),
            [0])
        t_n = tf.image.resize_images(t_n,
                                     50,
                                     50,
                                     method=tf.image.ResizeMethod.BICUBIC)
        t_n = tf.constant(ops.normalize(t_n.eval(), 0, 255))
        file.write(tf.image.encode_png(t_n).eval())
    index += 1
Ejemplo n.º 25
0
'''
  h_pool1 = nn_ops.conv2d_transpose(h_conv2 + b_conv2,W_conv2,
      [class_size,14,14,conv1_size],[1,1,1,1])
'''
index = 0
dir = "l1f/"
for t in W_conv1_t.eval():
  with open(dir+'filter'+str(index)+'.png', "wb") as file:
    t = tf.constant(t)
    #t = tf.expand_dims(tf.constant(t), 0)
    #t_n = tf.squeeze(nn_ops.conv2d_transpose(t, W_conv1, [1,5,5,1],[1,1,1,1]), [0])
    t = tf.image.resize_images(t,50,50, method=tf.image.ResizeMethod.BICUBIC)
    t = tf.constant(ops.normalize(t.eval(), 0, 255))
    file.write(tf.image.encode_png(t).eval())
  index += 1

W_conv2_t = tf.transpose(W_conv2, perm=[3,0,1,2])

index = 0
dir = "l2f/"
#for t in W_conv1_t.eval():
for t in W_conv2_t.eval():
  with open(dir+'filter'+str(index)+'.png', "wb") as file:
    t = tf.expand_dims(tf.constant(t), 0)
    t_n = tf.squeeze(nn_ops.conv2d_transpose(t, W_conv1, [1,5,5,1],[1,1,1,1]), [0])
    t_n = tf.image.resize_images(t_n,50,50, method=tf.image.ResizeMethod.BICUBIC)
    t_n = tf.constant(ops.normalize(t_n.eval(), 0, 255))
    file.write(tf.image.encode_png(t_n).eval())
  index += 1