Esempio n. 1
0
  def _VerifyValues(self, input_sizes, filter_sizes, out_backprop_sizes, stride,
                    padding, expected):
    """Tests that gen_nn_ops.conv2d_backprop_input produces the expected output.

    Args:
      input_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_sizes: Filter tensor dimensions in
        [kernel_rows, kernel_cols, input_depth, output_depth].
      out_backprop_sizes: Output gradients tensor dimensions.
      stride: Stride.
      padding: Padding type.
      expected: Expected output.
    """
    total_size_1 = np.prod(filter_sizes)
    total_size_2 = np.prod(out_backprop_sizes)
    x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)
    x2 = np.arange(
        1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
    strides = [1, stride, stride, 1]

    with self.test_session() as sess:
      with self.test_scope():
        t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
        t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
        out = gen_nn_ops.conv2d_backprop_input(
            input_sizes=input_sizes,
            filter=t1,
            out_backprop=t2,
            strides=strides,
            padding=padding,
            data_format="NHWC")
      value = sess.run(out, {t1: x1, t2: x2})
      self.assertArrayNear(expected, np.ravel(value), 1e-3)
Esempio n. 2
0
def conv2d_transpose(value, filter, output_shape, strides, padding="SAME",
                     name=None):
  """The transpose of `conv2d`.

  This operation is sometimes called "deconvolution" after (Deconvolutional
  Networks)[http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf], but is
  actually the transpose (gradient) of `conv2d` rather than an actual
  deconvolution.

  Args:
    value: A 4-D `Tensor` of type `float` and shape
      `[batch, height, width, in_channels]`.
    filter: A 4-D `Tensor` with the same type as `value` and shape
      `[height, width, output_channels, in_channels]`.  `filter`'s
      `in_channels` dimension must match that of `value`.
    output_shape: A 1-D `Tensor` representing the output shape of the
      deconvolution op.
    strides: A list of ints. The stride of the sliding window for each
      dimension of the input tensor.
    padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
    name: Optional name for the returned tensor.

  Returns:
    A `Tensor` with the same type as `value`.

  Raises:
    ValueError: If input/output depth does not match `filter`'s shape, or if
      padding is other than `'VALID'` or `'SAME'`.
  """
  with ops.op_scope([value, filter, output_shape], name,
                    "conv2d_transpose") as name:
    value = ops.convert_to_tensor(value, name="value")
    filter = ops.convert_to_tensor(filter, name="filter")
    if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
      raise ValueError(
          "input channels does not match filter's input channels, "
          "{} != {}".format(value.get_shape()[3], filter.get_shape()[3]))

    output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
    if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
      raise ValueError("output_shape must have shape (4,), got {}"
                       .format(output_shape_.get_shape()))

    if isinstance(output_shape, (list, np.ndarray)):
      # output_shape's shape should be == [4] if reached this point.
      if not filter.get_shape()[2].is_compatible_with(output_shape[3]):
        raise ValueError(
            "output_shape does not match filter's output channels, "
            "{} != {}".format(output_shape[3], filter.get_shape()[2]))

    if padding != "VALID" and padding != "SAME":
      raise ValueError("padding must be either VALID or SAME:"
                       " {}".format(padding))

    return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                            filter=filter,
                                            out_backprop=value,
                                            strides=strides,
                                            padding=padding,
                                            name=name)
Esempio n. 3
0
def _Conv2DGrad(op, grad):
  """Gradient function for Conv2D."""
  dilations = op.get_attr("dilations")
  strides = op.get_attr("strides")
  padding = op.get_attr("padding")
  explicit_paddings = op.get_attr("explicit_paddings")
  use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
  data_format = op.get_attr("data_format")
  shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])

  # We call the gen_nn_ops backprop functions instead of nn_ops backprop
  # functions for performance reasons in Eager mode. gen_nn_ops functions take a
  # `explicit_paddings` parameter, but nn_ops functions do not. So if were were
  # to use the nn_ops functions, we would have to convert `padding` and
  # `explicit_paddings` into a single `padding` parameter, increasing overhead
  # in Eager mode.
  return [
      gen_nn_ops.conv2d_backprop_input(
          shape_0,
          op.inputs[1],
          grad,
          dilations=dilations,
          strides=strides,
          padding=padding,
          explicit_paddings=explicit_paddings,
          use_cudnn_on_gpu=use_cudnn_on_gpu,
          data_format=data_format),
      gen_nn_ops.conv2d_backprop_filter(
          op.inputs[0],
          shape_1,
          grad,
          dilations=dilations,
          strides=strides,
          padding=padding,
          explicit_paddings=explicit_paddings,
          use_cudnn_on_gpu=use_cudnn_on_gpu,
          data_format=data_format)
  ]
Esempio n. 4
0
def _Conv2DGrad(op, grad):
    """Gradient function for Conv2D."""
    dilations = op.get_attr("dilations")
    strides = op.get_attr("strides")
    padding = op.get_attr("padding")
    explicit_paddings = op.get_attr("explicit_paddings")
    use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
    data_format = op.get_attr("data_format")
    shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])

    # We call the gen_nn_ops backprop functions instead of nn_ops backprop
    # functions for performance reasons in Eager mode. gen_nn_ops functions take a
    # `explicit_paddings` parameter, but nn_ops functions do not. So if were were
    # to use the nn_ops functions, we would have to convert `padding` and
    # `explicit_paddings` into a single `padding` parameter, increasing overhead
    # in Eager mode.
    return [
        gen_nn_ops.conv2d_backprop_input(shape_0,
                                         op.inputs[1],
                                         grad,
                                         dilations=dilations,
                                         strides=strides,
                                         padding=padding,
                                         explicit_paddings=explicit_paddings,
                                         use_cudnn_on_gpu=use_cudnn_on_gpu,
                                         data_format=data_format),
        gen_nn_ops.conv2d_backprop_filter(op.inputs[0],
                                          shape_1,
                                          grad,
                                          dilations=dilations,
                                          strides=strides,
                                          padding=padding,
                                          explicit_paddings=explicit_paddings,
                                          use_cudnn_on_gpu=use_cudnn_on_gpu,
                                          data_format=data_format)
    ]
Esempio n. 5
0
def _Conv2DBackpropFilterGrad(op, grad):
  # We call the gen_nn_ops backprop functions instead of nn_ops backprop
  # functions for performance reasons in Eager mode. See _Conv2DGrad.
  return [
      gen_nn_ops.conv2d_backprop_input(
          array_ops.shape(op.inputs[0]),
          grad,
          op.inputs[2],
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          explicit_paddings=op.get_attr("explicit_paddings"),
          use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
          data_format=op.get_attr("data_format").decode()), None,
      gen_nn_ops.conv2d(
          op.inputs[0],
          grad,
          dilations=op.get_attr("dilations"),
          strides=op.get_attr("strides"),
          padding=op.get_attr("padding"),
          explicit_paddings=op.get_attr("explicit_paddings"),
          use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
          data_format=op.get_attr("data_format").decode())
  ]
Esempio n. 6
0
    def _VerifyValues(self,
                      input_sizes=None,
                      filter_sizes=None,
                      out_backprop_sizes=None,
                      strides=None,
                      dilations=None,
                      padding=None,
                      data_format_src="NHWC",
                      data_format_dst="NHWC",
                      expected=None):
        """Tests that gen_nn_ops.conv2d_backprop_input produces the expected output.

    Args:
      input_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_sizes: Filter tensor dimensions in
        [kernel_rows, kernel_cols, input_depth, output_depth].
      out_backprop_sizes: Output gradients tensor dimensions.
      strides: Strides.
      dilations: Dilations.
      padding: Padding type.
      data_format_src: Data format input is in.
      data_format_dst: Data format verification will run and input is converted
        to.
      expected: Expected output.
    """

        total_size_1 = np.prod(filter_sizes)
        total_size_2 = np.prod(out_backprop_sizes)
        x1 = np.arange(1, total_size_1 + 1,
                       dtype=np.float32).reshape(filter_sizes)
        x2 = np.arange(1, total_size_2 + 1,
                       dtype=np.float32).reshape(out_backprop_sizes)
        strides = [1] + strides + [1]
        if dilations is not None:
            dilations = [1] + dilations + [1]

        expected = np.reshape(expected, input_sizes)

        # Convert between data formats.
        expected = test_utils.ConvertBetweenDataFormats(
            expected, data_format_src, data_format_dst)
        x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
                                                  data_format_dst)
        input_sizes = test_utils.PermuteDimsBetweenDataFormats(
            input_sizes, data_format_src, data_format_dst)
        out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
            out_backprop_sizes, data_format_src, data_format_dst)
        strides = test_utils.PermuteDimsBetweenDataFormats(
            strides, data_format_src, data_format_dst)
        if dilations is not None:
            dilations = test_utils.PermuteDimsBetweenDataFormats(
                dilations, data_format_src, data_format_dst)

        with self.cached_session() as sess:
            t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
            t2 = array_ops.placeholder(dtypes.float32,
                                       shape=out_backprop_sizes)
            with self.test_scope():
                out = gen_nn_ops.conv2d_backprop_input(
                    input_sizes=input_sizes,
                    filter=t1,
                    out_backprop=t2,
                    strides=strides,
                    dilations=dilations,
                    padding=padding,
                    data_format=data_format_dst)

            value = sess.run(out, {t1: x1, t2: x2})
            self.assertAllEqual(input_sizes, value.shape)
            self.assertAllClose(expected, value, 1e-3)
Esempio n. 7
0
def conv_1d_tranpose(layer,
                     nb_filter,
                     filter_size,
                     strides,
                     padding='same',
                     bias=True,
                     scope=None,
                     reuse=False,
                     bias_init='zeros',
                     trainable=True,
                     restore=True,
                     regularizer=None,
                     weight_decay=0.001,
                     weights_init='uniform_scaling',
                     name="deconv_1d"):
    '''
    layer: A 3-D `Tensor` of type `float` and shape `[batch, in_width, in_channels]` .
    SEE: https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_backprop_input
    SEE2: https://github.com/tensorflow/tensorflow/pull/13105/commits/2ca9b908d1978a94855349309fd16a67cfd98659
    TODO: ADD weight-decay/regularizer
    '''
    input_shape = utils.get_incoming_shape(layer)
    _, in_width, in_channels = input_shape
    batch_size = tf.shape(layer)[0]

    filter_size = [filter_size, nb_filter, in_channels]
    output_shape = [batch_size, strides * in_width, nb_filter
                    ]  # this trick I think work only for strict up-sampling
    output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")

    strides = [1, 1, strides, 1]
    spatial_start_dim = 1
    padding = utils.autoformat_padding(padding)

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[layer],
                           reuse=reuse) as scope:
        name = scope.name
        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        elif type(W_init) in [tf.Tensor, np.ndarray, list]:
            filter_size = None

        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: tflearn.losses.get(regularizer)(x, weight_decay
                                                                )

        W = vs.variable('W',
                        shape=filter_size,
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)

        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        # expand dims to make it compatible with conv2d
        W = tf.expand_dims(W, 0)
        layer = tf.expand_dims(layer, spatial_start_dim)
        output_shape_ = array_ops.concat(
            [output_shape_[:1], [1], output_shape_[1:]], axis=0)

        result = gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                                  filter=W,
                                                  out_backprop=layer,
                                                  strides=strides,
                                                  padding=padding,
                                                  name=name)

        result = array_ops.squeeze(result, [spatial_start_dim])
        result = tf.reshape(result, shape=output_shape)

        if bias:
            b_shape = [nb_filter]
            bias_init = initializations.get(bias_init)()
            b = vs.variable('b',
                            shape=b_shape,
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
            result = tf.nn.bias_add(result, b)
            result.b = b

        result.scope = scope
        result.W = W

    return result
Esempio n. 8
0
    raise AssertionError("Convolution test failed")
##### Conv backprop grad ######
inp_shape = [10, 10, 10, 10]
filters = [[i, i, 10, 3] for i in [1, 2, 3, 4, 7]]
strides = [[1, i, i, 1] for i in [1, 3, 4, 7, 8]]
paddings = ["SAME", "VALID"]
for p in paddings:
    for f in filters:
        for s in strides:
            print(p, f, s)
            filter = tf.placeholder(tf.float64, f)
            outp = tf.nn.conv2d(inp, filter, s, padding=p, data_format="NHWC")
            out_backprop = tf.placeholder(tf.float64, outp.shape)
            inp_gradients = gen_nn_ops.conv2d_backprop_input(inp_shape,
                                                             filter,
                                                             out_backprop,
                                                             s,
                                                             padding=p)
            test_grads = np.random.uniform(size=outp.shape).astype(np.float64)
            test_filter = np.random.uniform(size=tuple(f)).astype(np.float64)

            output_tf = sess_tf.run(inp_gradients,
                                    feed_dict={
                                        filter: test_filter,
                                        out_backprop: test_grads
                                    })
            output_dace = sess_dace.run(inp_gradients,
                                        feed_dict={
                                            filter: test_filter,
                                            out_backprop: test_grads
                                        })
Esempio n. 9
0
  def _VerifyValues(self,
                    input_sizes=None,
                    filter_sizes=None,
                    out_backprop_sizes=None,
                    strides=None,
                    dilations=None,
                    padding=None,
                    data_format_src="NHWC",
                    data_format_dst="NHWC",
                    expected=None):
    """Tests that gen_nn_ops.conv2d_backprop_input produces the expected output.

    Args:
      input_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_sizes: Filter tensor dimensions in
        [kernel_rows, kernel_cols, input_depth, output_depth].
      out_backprop_sizes: Output gradients tensor dimensions.
      strides: Strides.
      dilations: Dilations.
      padding: Padding type.
      data_format_src: Data format input is in.
      data_format_dst: Data format verification will run and input is converted
        to.
      expected: Expected output.
    """

    total_size_1 = np.prod(filter_sizes)
    total_size_2 = np.prod(out_backprop_sizes)
    x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)
    x2 = np.arange(
        1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
    strides = [1] + strides + [1]
    if dilations is not None:
      dilations = [1] + dilations + [1]

    expected = np.reshape(expected, input_sizes)

    # Convert between data formats.
    expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
                                                    data_format_dst)
    x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
                                              data_format_dst)
    input_sizes = test_utils.PermuteDimsBetweenDataFormats(
        input_sizes, data_format_src, data_format_dst)
    out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
        out_backprop_sizes, data_format_src, data_format_dst)
    strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
                                                       data_format_dst)
    if dilations is not None:
      dilations = test_utils.PermuteDimsBetweenDataFormats(
          dilations, data_format_src, data_format_dst)

    with self.test_session() as sess:
      t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
      t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
      with self.test_scope():
        out = gen_nn_ops.conv2d_backprop_input(
            input_sizes=input_sizes,
            filter=t1,
            out_backprop=t2,
            strides=strides,
            dilations=dilations,
            padding=padding,
            data_format=data_format_dst)

      value = sess.run(out, {t1: x1, t2: x2})
      self.assertAllEqual(input_sizes, value.shape)
      self.assertAllClose(expected, value, 1e-3)
Esempio n. 10
0
def conv1d_transpose(
        input,  # pylint: disable=redefined-builtin
        filters,
        output_shape,
        strides,
        padding="SAME",
        data_format="NWC",
        dilations=None,
        name=None):
    """The transpose of `conv1d`.
  This operation is sometimes called "deconvolution" after
  (Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d`
  rather than an actual deconvolution.
  Args:
    input: A 3-D `Tensor` of type `float` and shape
      `[batch, in_width, in_channels]` for `NWC` data format or
      `[batch, in_channels, in_width]` for `NCW` data format.
    filters: A 3-D `Tensor` with the same type as `value` and shape
      `[filter_width, output_channels, in_channels]`.  `filter`'s
      `in_channels` dimension must match that of `value`.
    output_shape: A 1-D `Tensor`, containing three elements, representing the
      output shape of the deconvolution op.
    strides: An int or list of `ints` that has length `1` or `3`.  The number of
      entries by which the filter is moved right at each step.
    padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
      See the "returns" section of `tf.nn.convolution` for details.
    data_format: A string. `'NWC'` and `'NCW'` are supported.
    dilations: An int or list of `ints` that has length `1` or `3` which
      defaults to 1. The dilation factor for each dimension of input. If set to
      k > 1, there will be k-1 skipped cells between each filter element on that
      dimension. Dilations in the batch and depth dimensions must be 1.
    name: Optional name for the returned tensor.
  Returns:
    A `Tensor` with the same type as `value`.
  Raises:
    ValueError: If input/output depth does not match `filter`'s shape, if
      `output_shape` is not at 3-element vector, if `padding` is other than
      `'VALID'` or `'SAME'`, or if `data_format` is invalid.
  References:
    Deconvolutional Networks:
      [Zeiler et al., 2010]
      (https://ieeexplore.ieee.org/abstract/document/5539957)
      ([pdf]
      (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
  """
    with ops.name_scope(name, "conv1d_transpose",
                        [input, filters, output_shape]) as name:
        # The format could be either NWC or NCW, map to NHWC or NCHW
        if data_format is None or data_format == "NWC":
            data_format = "NHWC"
            spatial_start_dim = 1
            channel_index = 2
        elif data_format == "NCW":
            data_format = "NCHW"
            spatial_start_dim = 2
            channel_index = 1
        else:
            raise ValueError("data_format must be \"NWC\" or \"NCW\".")

        # Reshape the input tensor to [batch, 1, in_width, in_channels]
        strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
        dilations = [1] + _get_sequence(dilations, 1, channel_index,
                                        "dilations")

        input = array_ops.expand_dims(input, spatial_start_dim)
        filters = array_ops.expand_dims(filters, 0)
        output_shape = list(output_shape) if not isinstance(
            output_shape, ops.Tensor) else output_shape
        output_shape = array_ops.concat([
            output_shape[:spatial_start_dim], [1],
            output_shape[spatial_start_dim:]
        ], 0)

        result = gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape,
                                                  filter=filters,
                                                  out_backprop=input,
                                                  strides=strides,
                                                  padding=padding,
                                                  data_format=data_format,
                                                  dilations=dilations,
                                                  name=name)
        return array_ops.squeeze(result, spatial_start_dim)
Esempio n. 11
0
def conv2d_transpose(value,
                     filter,
                     output_shape,
                     strides,
                     padding="SAME",
                     name=None):
    """The transpose of `conv2d`.

  This operation is sometimes called "deconvolution" after [Deconvolutional
  Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
  actually the transpose (gradient) of `conv2d` rather than an actual
  deconvolution.

  Args:
    value: A 4-D `Tensor` of type `float` and shape
      `[batch, height, width, in_channels]`.
    filter: A 4-D `Tensor` with the same type as `value` and shape
      `[height, width, output_channels, in_channels]`.  `filter`'s
      `in_channels` dimension must match that of `value`.
    output_shape: A 1-D `Tensor` representing the output shape of the
      deconvolution op.
    strides: A list of ints. The stride of the sliding window for each
      dimension of the input tensor.
    padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
    name: Optional name for the returned tensor.

  Returns:
    A `Tensor` with the same type as `value`.

  Raises:
    ValueError: If input/output depth does not match `filter`'s shape, or if
      padding is other than `'VALID'` or `'SAME'`.
  """
    with ops.op_scope([value, filter, output_shape], name,
                      "conv2d_transpose") as name:
        value = ops.convert_to_tensor(value, name="value")
        filter = ops.convert_to_tensor(filter, name="filter")
        if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
            raise ValueError(
                "input channels does not match filter's input channels, "
                "{} != {}".format(value.get_shape()[3],
                                  filter.get_shape()[3]))

        output_shape_ = ops.convert_to_tensor(output_shape,
                                              name="output_shape")
        if not output_shape_.get_shape().is_compatible_with(
                tensor_shape.vector(4)):
            raise ValueError(
                "output_shape must have shape (4,), got {}".format(
                    output_shape_.get_shape()))

        if isinstance(output_shape, (list, np.ndarray)):
            # output_shape's shape should be == [4] if reached this point.
            if not filter.get_shape()[2].is_compatible_with(output_shape[3]):
                raise ValueError(
                    "output_shape does not match filter's output channels, "
                    "{} != {}".format(output_shape[3],
                                      filter.get_shape()[2]))

        if padding != "VALID" and padding != "SAME":
            raise ValueError("padding must be either VALID or SAME:"
                             " {}".format(padding))

        return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                                filter=filter,
                                                out_backprop=value,
                                                strides=strides,
                                                padding=padding,
                                                name=name)
Esempio n. 12
0
def conv1d_transpose(
        value,
        filter,  # pylint: disable=redefined-builtin
        output_shape,
        stride,
        padding="SAME",
        data_format="NWC",
        name=None):
    """The transpose of `conv1d`.
  This operation is sometimes called "deconvolution" after [Deconvolutional
  Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
  actually the transpose (gradient) of `conv1d` rather than an actual
  deconvolution.
  Args:
    value: A 3-D `Tensor` of type `float` and shape
      `[batch, in_width, in_channels]` for `NWC` data format or
      `[batch, in_channels, in_width]` for `NCW` data format.
    filter: A 3-D `Tensor` with the same type as `value` and shape
      `[filter_width, output_channels, in_channels]`.  `filter`'s
      `in_channels` dimension must match that of `value`.
    output_shape: A 1-D `Tensor` representing the output shape of the
      deconvolution op.
    stride: An `integer`.  The number of entries by which
      the filter is moved right at each step.
    padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
      See the @{tf.nn.convolution$comment here}
    data_format: A string. 'NHWC' and 'NCHW' are supported.
    name: Optional name for the returned tensor.
  Returns:
    A `Tensor` with the same type as `value`.
  Raises:
    ValueError: If input/output depth does not match `filter`'s shape, or if
      padding is other than `'VALID'` or `'SAME'`.
  """
    with ops.name_scope(name, "conv1d_transpose",
                        [value, filter, output_shape]) as name:
        output_shape_ = ops.convert_to_tensor(output_shape,
                                              name="output_shape")
        if not output_shape_.get_shape().is_compatible_with(
                tensor_shape.vector(3)):
            raise ValueError(
                "output_shape must have shape (3,), got {}".format(
                    output_shape_.get_shape()))

        # The format could be either NWC or NCW, map to NHWC or NCHW
        if data_format is None or data_format == "NWC":
            data_format_2d = "NHWC"
            axis = 2
        elif data_format == "NCW":
            data_format_2d = "NCHW"
            axis = 1
        else:
            raise ValueError("data_format must be \"NWC\" or \"NCW\".")

        if not value.get_shape()[axis].is_compatible_with(
                filter.get_shape()[2]):
            raise ValueError(
                "input channels does not match filter's input channels, "
                "{} != {}".format(value.get_shape()[axis],
                                  filter.get_shape()[2]))

        if isinstance(output_shape, (list, np.ndarray)):
            # output_shape's shape should be == [3] if reached this point.
            if not filter.get_shape()[1].is_compatible_with(
                    output_shape[axis]):
                raise ValueError(
                    "output_shape does not match filter's output channels, "
                    "{} != {}".format(output_shape[axis],
                                      filter.get_shape()[1]))

        if padding != "VALID" and padding != "SAME":
            raise ValueError("padding must be either VALID or SAME:"
                             " {}".format(padding))

        # Reshape the input tensor to [batch, 1, in_width, in_channels]
        if data_format_2d == "NHWC":
            output_shape_ = array_ops.concat(
                [output_shape_[:1], [1], output_shape_[1:]], axis=0)
            spatial_start_dim = 1
            strides = [1, 1, stride, 1]
        else:
            output_shape_ = array_ops.concat(
                [output_shape_[:2], [1], output_shape_[2:]], axis=0)
            spatial_start_dim = 2
            strides = [1, 1, 1, stride]
        value = array_ops.expand_dims(value, spatial_start_dim)
        filter = array_ops.expand_dims(filter, 0)  # pylint: disable=redefined-builtin

        result = gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                                  filter=filter,
                                                  out_backprop=value,
                                                  strides=strides,
                                                  padding=padding,
                                                  data_format=data_format_2d,
                                                  name=name)
        return array_ops.squeeze(result, [spatial_start_dim])
Esempio n. 13
0
def test_conv():
    import tensorflow as tf
    from tensorflow.python.ops import gen_nn_ops
    from dace.frontend.tensorflow import TFSession
    inp_shape = [10, 10, 10, 10]
    filter_shape = [3, 3, 10, 3]
    strides = [1, 3, 3, 1]

    inp = tf.placeholder(tf.float64, inp_shape)
    filter = tf.placeholder(tf.float64, filter_shape)
    outp = tf.nn.conv2d(inp,
                        filter,
                        strides,
                        padding="SAME",
                        data_format="NHWC")

    test_in = np.random.uniform(size=tuple(inp_shape)).astype(np.float64)
    test_filter = np.random.uniform(size=tuple(filter_shape)).astype(
        np.float64)

    sess_dace = TFSession()
    sess_tf = tf.Session()

    output_dace = sess_dace.run(outp,
                                feed_dict={
                                    inp: test_in,
                                    filter: test_filter
                                })
    output_tf = sess_tf.run(outp,
                            feed_dict={
                                inp: test_in,
                                filter: test_filter
                            })
    try:
        assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10
    except:
        print(output_tf)
        print(output_dace)
        print(tf.linalg.norm(output_tf - output_dace).eval(session=sess_tf))
        raise AssertionError("Convolution test failed")
    ##### Conv backprop grad ######
    inp_shape = [10, 10, 10, 10]
    filters = [[2, 2, 10, 3]]
    strides = [[1, 3, 3, 1]]
    paddings = ["VALID"]
    for p in paddings:
        for f in filters:
            for s in strides:
                print(p, f, s)
                filter = tf.placeholder(tf.float64, f)
                outp = tf.nn.conv2d(inp,
                                    filter,
                                    s,
                                    padding=p,
                                    data_format="NHWC")
                out_backprop = tf.placeholder(tf.float64, outp.shape)
                inp_gradients = gen_nn_ops.conv2d_backprop_input(inp_shape,
                                                                 filter,
                                                                 out_backprop,
                                                                 s,
                                                                 padding=p)
                test_grads = np.random.uniform(size=outp.shape).astype(
                    np.float64)
                test_filter = np.random.uniform(size=tuple(f)).astype(
                    np.float64)

                output_tf = sess_tf.run(inp_gradients,
                                        feed_dict={
                                            filter: test_filter,
                                            out_backprop: test_grads
                                        })
                output_dace = sess_dace.run(inp_gradients,
                                            feed_dict={
                                                filter: test_filter,
                                                out_backprop: test_grads
                                            })

                try:
                    assert tf.norm(output_dace -
                                   output_tf).eval(session=sess_tf) < 1e-10
                except:
                    print(p)
                    print(f)
                    print(s)
                    print(output_tf)
                    print(output_dace)
                    print(
                        tf.linalg.norm(output_tf -
                                       output_dace).eval(session=sess_tf))
                    raise AssertionError("Convolution grad test failed")

    ##### Conv filter backprop ##################
    inp_shape = [10, 10, 10, 10]
    filters = [[4, 4, 10, 3]]
    strides = [[1, 1, 1, 1]]
    paddings = ["SAME"]
    for p in paddings:
        for f in filters:
            for s in strides:
                input_placeholder = tf.placeholder(tf.float64, inp_shape)
                filter = tf.placeholder(tf.float64, f)
                outp = tf.nn.conv2d(inp,
                                    filter,
                                    s,
                                    padding=p,
                                    data_format="NHWC")
                out_backprop = tf.placeholder(tf.float64, outp.shape)
                filter_gradients = gen_nn_ops.conv2d_backprop_filter(
                    input_placeholder, f, out_backprop, s, padding=p)
                test_grads = np.random.uniform(size=outp.shape).astype(
                    np.float64)
                test_input = np.random.uniform(size=tuple(inp_shape)).astype(
                    np.float64)

                output_tf = sess_tf.run(filter_gradients,
                                        feed_dict={
                                            input_placeholder: test_input,
                                            out_backprop: test_grads
                                        })
                output_dace = sess_dace.run(filter_gradients,
                                            feed_dict={
                                                input_placeholder: test_input,
                                                out_backprop: test_grads
                                            })

                try:
                    assert tf.norm(output_dace -
                                   output_tf).eval(session=sess_tf) < 1e-10
                except:
                    print(p)
                    print(f)
                    print(s)
                    print(output_tf)
                    print(output_dace)
                    print(
                        tf.linalg.norm(output_tf -
                                       output_dace).eval(session=sess_tf))
                    raise AssertionError("Convolution filter grad test failed")
Esempio n. 14
0
 def backprop_conv(self, activation, weights, relevance, strides=[1, 1, 1, 1], padding='SAME'):
     w_pos = tf.maximum(0., weights)
     z = tf.nn.conv2d(activation, w_pos, strides, padding='SAME') + self.epsilon
     s = relevance / z
     c = gen_nn_ops.conv2d_backprop_input(tf.shape(activation), w_pos, s, strides, padding)
     return activation * c