Пример #1
0
  def testConv1DTransposeSameNCW(self):
    # `NCW` data format is only supported for CUDA device.
    if test.is_gpu_available(cuda_only=True):
      with self.session():
        strides = [1, 1, 2]

        # Input, output: [batch, depth, width]
        x_shape = [2, 3, 4]
        y_shape = [2, 2, 8]

        # Filter: [kernel_width, output_depth, input_depth]
        f_shape = [3, 2, 3]

        x = constant_op.constant(
            1.0, shape=x_shape, name="x", dtype=dtypes.float32)
        f = constant_op.constant(
            1.0, shape=f_shape, name="filter", dtype=dtypes.float32)

        output = nn_ops.conv1d_transpose(
            x, f, y_shape, strides=strides, padding="SAME", data_format="NCW")

        value = self.evaluate(output)
        for n in range(x_shape[0]):
          for k in range(f_shape[1]):
            for w in range(y_shape[2]):
              target = 3.0
              # We add a case for locations divisible by the stride.
              w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
              if w_in:
                target += 3.0
              self.assertAllClose(target, value[n, k, w])
Пример #2
0
    def testConv1DTransposeSingleStride(self):
        with self.cached_session():
            strides = [1, 1, 1]

            # Input, output: [batch, width, depth]
            x_shape = [2, 6, 3]
            y_shape = [2, 6, 2]

            # Filter: [kernel_width, output_depth, input_depth]
            f_shape = [3, 2, 3]

            x = constant_op.constant(1.0,
                                     shape=x_shape,
                                     name="x",
                                     dtype=dtypes.float32)
            f = constant_op.constant(1.0,
                                     shape=f_shape,
                                     name="filter",
                                     dtype=dtypes.float32)
            output = nn_ops.conv1d_transpose(x,
                                             f,
                                             y_shape,
                                             strides=strides,
                                             padding="SAME")
            value = self.evaluate(output)

            for n in xrange(y_shape[0]):
                for w in xrange(y_shape[1]):
                    for c in xrange(y_shape[2]):
                        target = 2 * 3.0
                        w_in = w > 0 and w < y_shape[1] - 1
                        if w_in:
                            target += 3.0
                        self.assertAllClose(target, value[n, w, c])
Пример #3
0
 def backprop_conv(self, activation, kernel, relevance, strides, padding='SAME'):
     W_p = tf.maximum(0., kernel)
     z = nn_ops.conv1d(activation, W_p, strides, padding) + 1e-10
     s = relevance / z
     print(tf.shape(s))
     c = nn_ops.conv1d_transpose(s, W_p, tf.shape(activation), strides, padding)
     return activation * c
Пример #4
0
  def testConv1DTransposeSame(self):
    with self.cached_session():
      strides = [1, 2, 1]

      # Input, output: [batch, width, depth]
      x_shape = [2, 4, 3]
      y_shape = [2, 8, 2]

      # Filter: [kernel_width, output_depth, input_depth]
      f_shape = [3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv1d_transpose(
          x, f, y_shape, strides=strides, padding="SAME")
      value = self.evaluate(output)

      for n in range(x_shape[0]):
        for k in range(f_shape[1]):
          for w in range(y_shape[1]):
            target = 3.0
            # We add a case for locations divisible by the stride.
            w_in = w % strides[1] == 0 and w > 0 and w < y_shape[1] - 1
            if w_in:
              target += 3.0
            self.assertAllClose(target, value[n, w, k])
  def testConv1DTransposeSame(self):
    with self.cached_session():
      strides = [1, 2, 1]

      # Input, output: [batch, width, depth]
      x_shape = [2, 4, 3]
      y_shape = [2, 8, 2]

      # Filter: [kernel_width, output_depth, input_depth]
      f_shape = [3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv1d_transpose(
          x, f, y_shape, strides=strides, padding="SAME")
      value = self.evaluate(output)

      for n in xrange(x_shape[0]):
        for k in xrange(f_shape[1]):
          for w in xrange(y_shape[1]):
            target = 3.0
            # We add a case for locations divisible by the stride.
            w_in = w % strides[1] == 0 and w > 0 and w < y_shape[1] - 1
            if w_in:
              target += 3.0
            self.assertAllClose(target, value[n, w, k])
  def testConv1DTransposeSingleStride(self):
    with self.cached_session():
      strides = [1, 1, 1]

      # Input, output: [batch, width, depth]
      x_shape = [2, 6, 3]
      y_shape = [2, 6, 2]

      # Filter: [kernel_width, output_depth, input_depth]
      f_shape = [3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv1d_transpose(
          x, f, y_shape, strides=strides, padding="SAME")
      value = self.evaluate(output)

      for n in xrange(y_shape[0]):
        for w in xrange(y_shape[1]):
          for c in xrange(y_shape[2]):
            target = 2 * 3.0
            w_in = w > 0 and w < y_shape[1] - 1
            if w_in:
              target += 3.0
            self.assertAllClose(target, value[n, w, c])
  def testConv1DTransposeSameNCW(self):
    # `NCW` data format is only supported for CUDA device.
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True):
        strides = [1, 1, 2]

        # Input, output: [batch, depth, width]
        x_shape = [2, 3, 4]
        y_shape = [2, 2, 8]

        # Filter: [kernel_width, output_depth, input_depth]
        f_shape = [3, 2, 3]

        x = constant_op.constant(
            1.0, shape=x_shape, name="x", dtype=dtypes.float32)
        f = constant_op.constant(
            1.0, shape=f_shape, name="filter", dtype=dtypes.float32)

        output = nn_ops.conv1d_transpose(
            x, f, y_shape, strides=strides, padding="SAME", data_format="NCW")

        value = self.evaluate(output)
        for n in xrange(x_shape[0]):
          for k in xrange(f_shape[1]):
            for w in xrange(y_shape[2]):
              target = 3.0
              # We add a case for locations divisible by the stride.
              w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
              if w_in:
                target += 3.0
              self.assertAllClose(target, value[n, k, w])
Пример #8
0
    def backprop_conv_input(self, X, kernel, relevance, strides, padding='SAME', lowest=0., highest=1.):
        W_p = tf.maximum(0., kernel)
        W_n = tf.minimum(0., kernel)

        L = tf.ones_like(X, tf.float32) * lowest
        H = tf.ones_like(X, tf.float32) * highest

        z_o = nn_ops.conv1d(X, kernel, strides, padding)
        z_p = nn_ops.conv1d(L, W_p, strides, padding)
        z_n = nn_ops.conv1d(H, W_n, strides, padding)

        z = z_o - z_p - z_n + 1e-10
        s = relevance / z

        c_o = nn_ops.conv1d_transpose(s, kernel, tf.shape(X), strides, padding)
        c_p = nn_ops.conv1d_transpose(s, W_p, tf.shape(X), strides, padding)
        c_n = nn_ops.conv1d_transpose(s, W_n, tf.shape(X), strides, padding)

        return X * c_o - L * c_p - H * c_n
Пример #9
0
    def testConv1DTransposeValidNCW(self):
        # `NCW` data format is only supported for CUDA device.
        if test.is_gpu_available(cuda_only=True):
            with self.session():
                strides = [1, 1, 2]

                # Input, output: [batch, depth, width]
                x_shape = [2, 3, 4]
                y_shape = [2, 2, 9]

                # Filter: [kernel_width, output_depth, input_depth]
                f_shape = [3, 2, 3]

                x = constant_op.constant(1.0,
                                         shape=x_shape,
                                         name="x",
                                         dtype=dtypes.float32)
                f = constant_op.constant(1.0,
                                         shape=f_shape,
                                         name="filter",
                                         dtype=dtypes.float32)
                output = nn_ops.conv1d_transpose(x,
                                                 f,
                                                 y_shape,
                                                 strides=strides,
                                                 padding="VALID",
                                                 data_format="NCW")

                value = self.evaluate(output)
                cache_values = np.zeros(y_shape, dtype=np.float32)
                # The amount of padding added
                pad = 1
                for n in xrange(x_shape[0]):
                    for k in xrange(f_shape[1]):
                        for w in xrange(pad, y_shape[2] - pad):
                            target = 3.0
                            # We add a case for locations divisible by the stride.
                            w_in = w % strides[2] == 0 and w > pad and \
                                   w < y_shape[2] - 1 - pad
                            if w_in:
                                target += 3.0
                            cache_values[n, k, w] = target

                        # copy values in the border
                        cache_values[n, k, 0] = cache_values[n, k, 1]
                        cache_values[n, k, -1] = cache_values[n, k, -2]
                        cache_values[n, k, :] = cache_values[n, k, :]

                self.assertAllClose(cache_values, value)
Пример #10
0
    def testConv1DTransposeValid(self):
        with self.cached_session():
            strides = [1, 2, 1]

            # Input, output: [batch, width, depth]
            x_shape = [2, 4, 3]
            y_shape = [2, 9, 2]

            # Filter: [kernel_width, output_depth, input_depth]
            f_shape = [3, 2, 3]

            x = constant_op.constant(1.0,
                                     shape=x_shape,
                                     name="x",
                                     dtype=dtypes.float32)
            f = constant_op.constant(1.0,
                                     shape=f_shape,
                                     name="filter",
                                     dtype=dtypes.float32)
            output = nn_ops.conv1d_transpose(x,
                                             f,
                                             y_shape,
                                             strides=strides,
                                             padding="VALID")
            value = self.evaluate(output)

            cache_values = np.zeros(y_shape, dtype=np.float32)

            # The amount of padding added
            pad = 1

            for n in xrange(x_shape[0]):
                for k in xrange(f_shape[1]):
                    for w in xrange(pad, y_shape[1] - pad):
                        target = 3.0
                        # We add a case for locations divisible by the stride.
                        w_in = w % strides[
                            1] == 0 and w > pad and w < y_shape[1] - 1 - pad
                        if w_in:
                            target += 3.0
                        cache_values[n, w, k] = target

                    # copy values in the border
                    cache_values[n, 0, k] = cache_values[n, 1, k]
                    cache_values[n, -1, k] = cache_values[n, -2, k]
                    cache_values[n, :, k] = cache_values[n, :, k]

        self.assertAllClose(cache_values, value)
Пример #11
0
 def testGradient(self):
   x_shape = [2, 4, 3]
   f_shape = [3, 2, 3]
   y_shape = [2, 8, 2]
   strides = [1, 2, 1]
   np.random.seed(1)  # Make it reproducible.
   x_val = np.random.random_sample(x_shape).astype(np.float64)
   f_val = np.random.random_sample(f_shape).astype(np.float64)
   with self.cached_session():
     x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
     f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
     output = nn_ops.conv1d_transpose(
         x, f, y_shape, strides=strides, padding="SAME")
     err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
                                                   output, y_shape)
   print("conv1d_transpose gradient err = %g " % err)
   err_tolerance = 0.0005
   self.assertLess(err, err_tolerance)
 def testGradient(self):
   x_shape = [2, 4, 3]
   f_shape = [3, 2, 3]
   y_shape = [2, 8, 2]
   strides = [1, 2, 1]
   np.random.seed(1)  # Make it reproducible.
   x_val = np.random.random_sample(x_shape).astype(np.float64)
   f_val = np.random.random_sample(f_shape).astype(np.float64)
   with self.cached_session():
     x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
     f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
     output = nn_ops.conv1d_transpose(
         x, f, y_shape, strides=strides, padding="SAME")
     err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
                                                   output, y_shape)
   print("conv1d_transpose gradient err = %g " % err)
   err_tolerance = 0.0005
   self.assertLess(err, err_tolerance)
  def testConv1DTransposeValidNCW(self):
    # `NCW` data format is only supported for CUDA device.
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True):
        strides = [1, 1, 2]

        # Input, output: [batch, depth, width]
        x_shape = [2, 3, 4]
        y_shape = [2, 2, 9]

        # Filter: [kernel_width, output_depth, input_depth]
        f_shape = [3, 2, 3]

        x = constant_op.constant(
            1.0, shape=x_shape, name="x", dtype=dtypes.float32)
        f = constant_op.constant(
            1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
        output = nn_ops.conv1d_transpose(
            x, f, y_shape, strides=strides, padding="VALID", data_format="NCW")

        value = self.evaluate(output)
        cache_values = np.zeros(y_shape, dtype=np.float32)
        # The amount of padding added
        pad = 1
        for n in xrange(x_shape[0]):
          for k in xrange(f_shape[1]):
            for w in xrange(pad, y_shape[2] - pad):
              target = 3.0
              # We add a case for locations divisible by the stride.
              w_in = w % strides[2] == 0 and w > pad and \
                     w < y_shape[2] - 1 - pad
              if w_in:
                target += 3.0
              cache_values[n, k, w] = target

            # copy values in the border
            cache_values[n, k, 0] = cache_values[n, k, 1]
            cache_values[n, k, -1] = cache_values[n, k, -2]
            cache_values[n, k, :] = cache_values[n, k, :]

        self.assertAllClose(cache_values, value)
  def testConv1DTransposeValid(self):
    with self.cached_session():
      strides = [1, 2, 1]

      # Input, output: [batch, width, depth]
      x_shape = [2, 4, 3]
      y_shape = [2, 9, 2]

      # Filter: [kernel_width, output_depth, input_depth]
      f_shape = [3, 2, 3]

      x = constant_op.constant(
          1.0, shape=x_shape, name="x", dtype=dtypes.float32)
      f = constant_op.constant(
          1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
      output = nn_ops.conv1d_transpose(
          x, f, y_shape, strides=strides, padding="VALID")
      value = self.evaluate(output)

      cache_values = np.zeros(y_shape, dtype=np.float32)

      # The amount of padding added
      pad = 1

      for n in xrange(x_shape[0]):
        for k in xrange(f_shape[1]):
          for w in xrange(pad, y_shape[1] - pad):
            target = 3.0
            # We add a case for locations divisible by the stride.
            w_in = w % strides[1] == 0 and w > pad and w < y_shape[1] - 1 - pad
            if w_in:
              target += 3.0
            cache_values[n, w, k] = target

          # copy values in the border
          cache_values[n, 0, k] = cache_values[n, 1, k]
          cache_values[n, -1, k] = cache_values[n, -2, k]
          cache_values[n, :, k] = cache_values[n, :, k]

    self.assertAllClose(cache_values, value)
Пример #15
0
def conv1d_t_relu(inputs, w, b, output_shape, stride):
	conv = nn_ops.conv1d_transpose(inputs, w, output_shape=output_shape, stride=stride, padding='SAME') + b
	conv = tf.nn.relu(conv)
	return conv
Пример #16
0
def conv_transpose_1d(value, filter_, output_shape, stride, padding="SAME"):
    return conv1d_transpose(value, filter_, output_shape, stride, padding)