def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
                          padding):
   # Generate numbers in a narrow range, so that there are many duplicates
   # in the input.
   tensor_input = np.random.random_integers(0, 3,
                                            input_shape).astype(np.float32)
   tensor_output = np.random.rand(*output_shape).astype(np.float32)
   with self.test_session(use_gpu=True):
     t = tf.constant(tensor_input, shape=input_shape)
     _, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
     argmax = argmax_op.eval()
     grad_in = tf.constant(tensor_output, shape=output_shape)
     out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
                                                    ksize, strides, padding)
     gpu_val = out_op.eval()
     self.assertShapeEqual(gpu_val, out_op)
   with self.test_session(use_gpu=False):
     t = tf.constant(tensor_input, shape=input_shape)
     out_op = tf.nn.max_pool(t, ksize, strides, padding)
     orig_out = out_op.eval()
     grad_in = tf.constant(tensor_output, shape=output_shape)
     out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize,
                                        strides, padding)
     cpu_val = out_op.eval()
     self.assertShapeEqual(cpu_val, out_op)
   self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
Exemple #2
0
def _MaxPoolGrad(op, grad):
  return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
                                   op.get_attr("ksize"),
                                   op.get_attr("strides"),
                                   padding=op.get_attr("padding"),
                                   data_format=op.get_attr("data_format")
                                  )
 def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
                          padding):
   for dtype in np.float32, np.float16:
     # Generate numbers in a narrow range, so that there are many duplicates
     # in the input.
     tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
     tensor_output = np.random.rand(*output_shape).astype(dtype)
     with self.test_session(use_gpu=True):
       t = constant_op.constant(tensor_input, shape=input_shape)
       _, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
       argmax = argmax_op.eval()
       grad_in = constant_op.constant(tensor_output, shape=output_shape)
       out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
                                                      ksize, strides, padding)
       gpu_val = out_op.eval()
       self.assertShapeEqual(gpu_val, out_op)
     with self.test_session(use_gpu=False):
       t = constant_op.constant(tensor_input, shape=input_shape)
       out_op = nn_ops.max_pool(t, ksize, strides, padding)
       orig_out = out_op.eval()
       grad_in = constant_op.constant(tensor_output, shape=output_shape)
       out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize, strides,
                                          padding)
       cpu_val = out_op.eval()
       self.assertShapeEqual(cpu_val, out_op)
     if dtype == np.float16:
       # The CPU version accumulates its gradient on fp16, so it's less
       # accurate than the GPU version that does the accumulation on fp32
       self.assertAllClose(cpu_val, gpu_val, rtol=0.01, atol=0.01)
     else:
       self.assertAllClose(cpu_val, gpu_val)
Exemple #4
0
def _MaxPoolGradGrad(op, grad):
    gradient = gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0],
            grad, op.get_attr("ksize"), op.get_attr("strides"),
            padding=op.get_attr("padding"), data_format=op.get_attr("data_format"))
    gradgrad1 = array_ops.zeros(shape = array_ops.shape(op.inputs[1]), dtype=gradient.dtype)
    gradgrad2 = array_ops.zeros(shape = array_ops.shape(op.inputs[2]), dtype=gradient.dtype)
    return (gradient, gradgrad1, gradgrad2)
Exemple #5
0
def _MaxPoolGrad(op, grad):
  return gen_nn_ops._max_pool_grad(op.inputs[0],
                                   op.outputs[0],
                                   grad,
                                   op.get_attr("ksize"),
                                   op.get_attr("strides"),
                                   padding=op.get_attr("padding"),
                                   data_format=op.get_attr("data_format"))
Exemple #6
0
def _MaxPoolWithArgmaxGrad(op, grad, arg):
  return gen_nn_ops._max_pool_grad(op.inputs[0],
                                   op.outputs[0],
                                   grad,
                                   op.get_attr("ksize"),
                                   op.get_attr("strides"),
                                   padding=op.get_attr("padding"),
                                   data_format='NHWC')
Exemple #7
0
def _MaxPoolWithArgmaxGrad(op, grad, unused_argmax_grad):
  return gen_nn_ops._max_pool_grad(op.inputs[0],
                                   op.outputs[0],
                                   grad,
                                   op.get_attr("ksize"),
                                   op.get_attr("strides"),
                                   padding=op.get_attr("padding"),
                                   data_format='NHWC')
Exemple #8
0
def max_unpool(pool, pooled, ksize, strides):
    #最大反池化
    unpool = gen_nn_ops._max_pool_grad(
        pool,  #池化前的tensor,即max pool的输入
        pooled,  #池化后的tensor,即max pool 的输出
        pooled,  #需要进行反池化操作的tensor,可以是任意shape和pool1一样的tensor
        ksize=ksize,
        strides=strides,
        padding='SAME')
    return unpool
Exemple #9
0
def max_unpool(indata, origin_pooled,name):
    with tf.name_scope(name):
        op = origin_pooled.op
        return gen_nn_ops._max_pool_grad(
            op.inputs[0],
            op.outputs[0],
            indata,
            op.get_attr("ksize"),
            op.get_attr("strides"),
            padding=op.get_attr("padding"),
            data_format=op.get_attr("data_format"))
Exemple #10
0
    def backprop_pool(self, activation, relevance, ksize, strides, pooling_type, padding='SAME'):

        if pooling_type.lower() in 'avg':
            z = nn_ops.avg_pool(activation, ksize, strides, padding) + 1e-10
            s = relevance / z
            c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides, padding)
            return activation * c
        else:
            z = nn_ops.max_pool(activation, ksize, strides, padding) + 1e-10
            s = relevance / z
            c = gen_nn_ops._max_pool_grad(activation, z, s, ksize, strides, padding)
            return activation * c
Exemple #11
0
def _MaxPoolGradGradGrad(op, grad):
  return (array_ops.zeros(shape=array_ops.shape(op.inputs[0]),
                          dtype=op.inputs[0].dtype),
          array_ops.zeros(shape=array_ops.shape(op.inputs[1]),
                          dtype=op.inputs[1].dtype),
          gen_nn_ops._max_pool_grad(op.inputs[0],
                                    op.inputs[1],
                                    grad,
                                    op.get_attr("ksize"),
                                    op.get_attr("strides"),
                                    padding=op.get_attr("padding"),
                                    data_format=op.get_attr("data_format")))
Exemple #12
0
def _MaxPoolGradGradGrad(op, grad):
    return (array_ops.zeros(shape=array_ops.shape(op.inputs[0]),
                            dtype=op.inputs[0].dtype),
            array_ops.zeros(shape=array_ops.shape(op.inputs[1]),
                            dtype=op.inputs[1].dtype),
            gen_nn_ops._max_pool_grad(op.inputs[0],
                                      op.inputs[1],
                                      grad,
                                      op.get_attr("ksize"),
                                      op.get_attr("strides"),
                                      padding=op.get_attr("padding"),
                                      data_format=op.get_attr("data_format")))
def _MaxPoolGradGrad(op, grad):
    ksize = op.get_attr("ksize")
    strides = op.get_attr("strides")
    padding = op.get_attr("padding")
    data_format = op.get_attr("data_format")
    grads = []
    for i in op.inputs:
        igrad = gen_nn_ops._max_pool_grad(i, op.outputs[0], grad, ksize, strides,
                                          padding=padding,data_format=data_format)
        grads.append(igrad)

    return grads
Exemple #14
0
    def deconv(self, top):
        """
_max_pool_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format=None, name=None)
    Computes gradients of the maxpooling function.
    
    Args:
      orig_input: A `Tensor`. Must be one of the following types: `float32`, `half`.
        The original input tensor.
      orig_output: A `Tensor`. Must have the same type as `orig_input`.
        The original output tensor.
      grad: A `Tensor`. Must have the same type as `orig_input`.
        4-D.  Gradients w.r.t. the output of `max_pool`.
      ksize: A list of `ints` that has length `>= 4`.
        The size of the window for each dimension of the input tensor.
      strides: A list of `ints` that has length `>= 4`.
        The stride of the sliding window for each dimension of the
        input tensor.
      padding: A `string` from: `"SAME", "VALID"`.
        The type of padding algorithm to use.
      data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
        Specify the data format of the input and output data. With the
        default format "NHWC", the data is stored in the order of:
            [batch, in_height, in_width, in_channels].
        Alternatively, the format could be "NCHW", the data storage order of:
            [batch, in_channels, in_height, in_width].
      name: A name for the operation (optional).
    
    Returns:
      A `Tensor`. Has the same type as `orig_input`.
      Gradients w.r.t. the input to `max_pool`.
        """
        self.pre_unpool = top
        unpool = gen_nn_ops._max_pool_grad(orig_input=self.pre_pool,
                                           orig_output=self.aft_pool,
                                           grad=top,
                                           ksize=[1, 2, 2, 1],
                                           strides=[1, 2, 2, 1],
                                           padding="SAME")

        self.unpool = unpool

        # not reverse operation
        feat = tf.nn.relu(unpool)

        #feat = feat - self.b

        feat = tf.nn.conv2d_transpose(feat,
                                      self.w,
                                      self.input_shape, [1, 1, 1, 1],
                                      padding='SAME')

        return feat
def _MaxPoolGradGrad(op, grad):
    gradient = gen_nn_ops._max_pool_grad(
        op.inputs[0],
        op.outputs[0],
        grad,
        op.get_attr("ksize"),
        op.get_attr("strides"),
        padding=op.get_attr("padding"),
        data_format=op.get_attr("data_format"))
    gradgrad1 = array_ops.zeros(shape=array_ops.shape(op.inputs[1]),
                                dtype=gradient.dtype)
    gradgrad2 = array_ops.zeros(shape=array_ops.shape(op.inputs[2]),
                                dtype=gradient.dtype)
    return (gradient, gradgrad1, gradgrad2)
Exemple #16
0
def fprop_pool(F, X, strides=None, ksize=None, padding='SAME'):
    xshape = X.get_shape().as_list()
    fshape = F.get_shape().as_list()
    if len(xshape) != len(fshape):
        F = tf.reshape(F, (-1, int(np.ceil(
            xshape[1] / 2.0)), int(np.ceil(xshape[2] / 2.0)), xshape[3]))
    ksize = [1, 2, 2, 1] if ksize is None else ksize
    strides = [1, 2, 2, 1] if strides is None else strides

    Z = tf.nn.max_pool(X, strides=strides, ksize=ksize, padding=padding) + 1e-9
    S = F / Z
    C = gen_nn_ops._max_pool_grad(X, Z, S, ksize, strides, padding)
    F = X * C
    return F
Exemple #17
0
 def testDirectUseOverlapping(self):
     for num_batches in [1, 3]:
         for row_window_size in [2, 5]:
             for col_window_size in [2, 4]:
                 num_rows = (row_window_size - 1) * 5 + 1
                 num_cols = (col_window_size - 1) * 7 + 1
                 for num_channels in [1, 2]:
                     input_shape = (num_batches, num_rows, num_cols,
                                    num_channels)
                     with self.test_session() as _:
                         input_tensor = constant_op.constant(
                             self._GenerateUniqueRandomInputTensor(
                                 input_shape))
                         window_size = [
                             1, row_window_size, col_window_size, 1
                         ]
                         stride_size = [
                             1, row_window_size - 1, col_window_size - 1, 1
                         ]
                         padding = "VALID"
                         output_tensor = nn_ops.max_pool(
                             input_tensor, window_size, stride_size,
                             padding)
                         output_data = output_tensor.eval()
                         output_backprop = self._PRNG.randint(
                             100, size=output_data.shape)
                         input_backprop_tensor = gen_nn_ops._max_pool_grad(
                             input_tensor, output_tensor, output_backprop,
                             window_size, stride_size, padding)
                         input_backprop = input_backprop_tensor.eval()
                         row_seq = list(
                             range(0, num_rows, row_window_size - 1))
                         col_seq = list(
                             range(0, num_cols, col_window_size - 1))
                         row_seq[-1] += 1
                         col_seq[-1] += 1
                         fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
                             input_tensor,
                             output_tensor,
                             output_backprop,
                             row_seq,
                             col_seq,
                             overlapping=True)
                         fmp_input_backprop = fmp_input_backprop_tensor.eval(
                         )
                         self.assertShapeEqual(input_backprop,
                                               fmp_input_backprop_tensor)
                         self.assertAllClose(input_backprop,
                                             fmp_input_backprop)
Exemple #18
0
def rec(img):
    """
    Reconstruct part of the image after simple pooling
    Args:
        img: input image
    """
    with tf.Graph().as_default():
        img_op = tf.placeholder(dtype=tf.float32, shape=[1,300,300,3],name='input')
        pool_op = tf.nn.max_pool(img_op, ksize=[1,2,2,1], strides=[1,2,2,1],padding='VALID', name='pool')
        unpool_op = gen_nn_ops._max_pool_grad(img_op, pool_op, pool_op, [1,2,2,1], [1,2,2,1],'VALID')
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            pool_out, unpool_out = sess.run([pool_op, unpool_op],feed_dict={img_op:img})
            cv2.imwrite('img/pool_out.png', pool_out[0,:,:,:])
            cv2.imwrite('img/unpool_out.png', unpool_out[0,:,:,:])
Exemple #19
0
def test_max_pool_grad():
    ksize = [1, 2, 2, 1]
    strides = [1, 2, 2, 1]
    padding = 'VALID'
    shape = [1, 4, 4, 1]

    with tf.Session():
        X = tf.constant(np.random.normal(size=shape).astype(np.float32),
                        name="x")
        XS = tf.constant(np.random.normal(size=shape).astype(np.float32),
                         name="xs")
        Z = tf.nn.max_pool(X, strides=strides, ksize=ksize, padding=padding)
        S = tf.nn.max_pool(XS, strides=strides, ksize=ksize, padding=padding)
        C = gen_nn_ops._max_pool_grad(X, Z, S, ksize, strides, padding)
        err = tf.test.compute_gradient_error(X, shape, C, shape)

    assert err <= 1e-4
    print '-' * 60
    print 'Passed maxPoolGradGrad test!'
    print '-' * 60
    def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding):
        """Max Pooling Gradient.

    Args:
      orig_input: A float Tensor. The original input tensor.
      orig_output: A float Tensor. The original output tensor.
      grad: A float Tensor.
        The 4D (batch x rows x cols x depth) output backprop.
      window_rows: integer. Kernel size along rows dimension.
      window_cols: integer. Kernel size along cols dimension.
      row_stride: integer. Stride along rows dimension
      col_stride: integer. Stride along cols dimension
      padding: PoolingOpDef.Padding.  Padding type.

    Returns:
      A Tensor.
    """
        return gen_nn_ops._max_pool_grad(
            orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding
        )
Exemple #21
0
def unpooling(input,
              before_pool,
              padding,
              ksize=[1, 2, 2, 1],
              strides=[1, 2, 2, 1],
              name=None,
              data_format="NHWC"):
    """
    apply unpooling given the corresponding pooling op
    by using the gradient of pooling op
    """
    raise PermissionError('not permitted to use: not tested yet')
    unpool = gen_nn_ops._max_pool_grad(orig_input=before_pool,
                                       orig_output=input,
                                       grad=input,
                                       ksize=ksize,
                                       strides=strides,
                                       padding=padding,
                                       data_format=data_format,
                                       name=name)
    return unpool
  def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
                   window_cols, row_stride, col_stride, padding):
    """Max Pooling Gradient.

    Args:
      orig_input: A float Tensor. The original input tensor.
      orig_output: A float Tensor. The original output tensor.
      grad: A float Tensor.
        The 4D (batch x rows x cols x depth) output backprop.
      window_rows: integer. Kernel size along rows dimension.
      window_cols: integer. Kernel size along cols dimension.
      row_stride: integer. Stride along rows dimension
      col_stride: integer. Stride along cols dimension
      padding: PoolingOpDef.Padding.  Padding type.

    Returns:
      A Tensor.
    """
    return gen_nn_ops._max_pool_grad(
        orig_input, orig_output, grad,
        [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1],
        padding)
 def testDirectUseOverlapping(self):
   for num_batches in [1, 3]:
     for row_window_size in [2, 5]:
       for col_window_size in [2, 4]:
         num_rows = (row_window_size - 1) * 5 + 1
         num_cols = (col_window_size - 1) * 7 + 1
         for num_channels in [1, 2]:
           input_shape = (num_batches, num_rows, num_cols, num_channels)
           with self.test_session() as _:
             input_tensor = constant_op.constant(
                 self._GenerateUniqueRandomInputTensor(input_shape))
             window_size = [1, row_window_size, col_window_size, 1]
             stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
             padding = "VALID"
             output_tensor = nn_ops.max_pool(input_tensor, window_size,
                                             stride_size, padding)
             output_data = output_tensor.eval()
             output_backprop = self._PRNG.randint(100, size=output_data.shape)
             input_backprop_tensor = gen_nn_ops._max_pool_grad(input_tensor,
                                                               output_tensor,
                                                               output_backprop,
                                                               window_size,
                                                               stride_size,
                                                               padding)
             input_backprop = input_backprop_tensor.eval()
             row_seq = list(range(0, num_rows, row_window_size - 1))
             col_seq = list(range(0, num_cols, col_window_size - 1))
             row_seq[-1] += 1
             col_seq[-1] += 1
             fmp_input_backprop_tensor = gen_nn_ops._fractional_max_pool_grad(
                 input_tensor,
                 output_tensor,
                 output_backprop,
                 row_seq,
                 col_seq,
                 overlapping=True)
             fmp_input_backprop = fmp_input_backprop_tensor.eval()
             self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
             self.assertAllClose(input_backprop, fmp_input_backprop)
Exemple #24
0
def model_unpool(images, idx_map, is_training, reuse=False):
    """ Network model
    Args:
      images: [batch)size, H, W, C]
      is_training: True if traning mode (for batchnorm)
    """

    if not reuse:
        print('inference::input', images.get_shape())
    bn = True
    grads_dict = {}

    with tf.variable_scope('conv1_1', reuse=reuse) as scope:  #1
        conv1_1 = tf.layers.conv2d(
            inputs=images,
            filters=64,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv1_1 = tf.contrib.layers.batch_norm(conv1_1,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv1_1 = tf.nn.relu(conv1_1)
        if not reuse:
            print('conv1_1', conv1_1.get_shape())
        grads_dict['conv1_1'] = tf.gradients(conv1_1, images)

    with tf.variable_scope('conv1_2', reuse=reuse) as scope:  #2
        conv1_2 = tf.layers.conv2d(
            inputs=conv1_1,
            filters=64,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv1_2 = tf.contrib.layers.batch_norm(conv1_2,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv1_2 = tf.nn.relu(conv1_2)
        pool1 = tf.nn.max_pool(conv1_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool')  #pool1
        if not reuse:
            print('pool1', pool1.get_shape())
        grads_dict['conv1_2'] = tf.gradients(conv1_2, images)
        grads_dict['pool1'] = tf.gradients(pool1, images)

    with tf.variable_scope('conv2_1', reuse=reuse) as scope:  #3
        conv2_1 = tf.layers.conv2d(
            inputs=pool1,
            filters=128,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv2_1 = tf.contrib.layers.batch_norm(conv2_1,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv2_1 = tf.nn.relu(conv2_1)
        if not reuse:
            print('conv2_1', conv2_1.get_shape())
        grads_dict['conv2_1'] = tf.gradients(conv2_1, images)

    with tf.variable_scope('conv2_2', reuse=reuse) as scope:  #4
        conv2_2 = tf.layers.conv2d(
            inputs=conv2_1,
            filters=128,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv2_2 = tf.contrib.layers.batch_norm(conv2_2,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv2_2 = tf.nn.relu(conv2_2)
        pool2 = tf.nn.max_pool(conv2_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool')  #pool2
        if not reuse:
            print('pool2', pool2.get_shape())
        grads_dict['conv2_2'] = tf.gradients(conv2_2, images)
        grads_dict['pool2'] = tf.gradients(pool2, images)

    with tf.variable_scope('conv3_1', reuse=reuse) as scope:  #5
        conv3_1 = tf.layers.conv2d(
            inputs=pool2,
            filters=256,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv3_1 = tf.contrib.layers.batch_norm(conv3_1,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv3_1 = tf.nn.relu(conv3_1)
        if not reuse:
            print('conv3_1', conv3_1.get_shape())
        grads_dict['conv3_1'] = tf.gradients(conv3_1, images)

    with tf.variable_scope('conv3_2', reuse=reuse) as scope:  #6
        conv3_2 = tf.layers.conv2d(
            inputs=conv3_1,
            filters=256,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv3_2 = tf.contrib.layers.batch_norm(conv3_2,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv3_2 = tf.nn.relu(conv3_2)
        if not reuse:
            print('conv3_2', conv3_2.get_shape())
        grads_dict['conv3_2'] = tf.gradients(conv3_2, images)

    with tf.variable_scope('conv3_3', reuse=reuse) as scope:  #7
        conv3_3 = tf.layers.conv2d(
            inputs=conv3_2,
            filters=256,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv3_3 = tf.contrib.layers.batch_norm(conv3_3,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv3_3 = tf.nn.relu(conv3_3)
        pool3 = tf.nn.max_pool(conv3_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool')  #pool3
        if not reuse:
            print('pool3', pool3.get_shape())
        grads_dict['conv3_3'] = tf.gradients(conv3_3, images)
        grads_dict['pool3'] = tf.gradients(pool3, images)

    with tf.variable_scope('conv4_1', reuse=reuse) as scope:  # 8
        conv4_1 = tf.layers.conv2d(
            inputs=pool3,
            filters=512,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv4_1 = tf.contrib.layers.batch_norm(conv4_1,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv4_1 = tf.nn.relu(conv4_1)
        if not reuse:
            print('conv4_1', conv4_1.get_shape())
        grads_dict['conv4_1'] = tf.gradients(conv4_1, images)

    with tf.variable_scope('conv4_2', reuse=reuse) as scope:  #9
        conv4_2 = tf.layers.conv2d(
            inputs=conv4_1,
            filters=512,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv4_2 = tf.contrib.layers.batch_norm(conv4_2,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv4_2 = tf.nn.relu(conv4_2)
        if not reuse:
            print('conv4_2', conv4_2.get_shape())
        grads_dict['conv4_2'] = tf.gradients(conv4_2, images)

    with tf.variable_scope('conv4_3', reuse=reuse) as scope:  #10
        conv4_3 = tf.layers.conv2d(
            inputs=conv4_2,
            filters=512,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv4_3 = tf.contrib.layers.batch_norm(conv4_3,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv4_3 = tf.nn.relu(conv4_3)
        pool4 = tf.nn.max_pool(conv4_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool')  #pool4
        if not reuse:
            print('pool4', pool4.get_shape())
        grads_dict['conv4_3'] = tf.gradients(conv4_3, images)
        grads_dict['pool4'] = tf.gradients(pool4, images)

    with tf.variable_scope('conv5_1', reuse=reuse) as scope:  #11
        conv5_1 = tf.layers.conv2d(
            inputs=pool4,
            filters=512,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv5_1 = tf.contrib.layers.batch_norm(conv5_1,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv5_1 = tf.nn.relu(conv5_1)
        if not reuse:
            print('conv5_1', conv5_1.get_shape())
        grads_dict['conv5_1'] = tf.gradients(conv5_1, images)

    with tf.variable_scope('conv5_2', reuse=reuse) as scope:  #12
        conv5_2 = tf.layers.conv2d(
            inputs=conv5_1,
            filters=512,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv5_2 = tf.contrib.layers.batch_norm(conv5_2,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv5_2 = tf.nn.relu(conv5_2)
        if not reuse:
            print('conv5_2', conv5_2.get_shape())
        grads_dict['conv5_2'] = tf.gradients(conv5_2, images)

    with tf.variable_scope('conv5_3', reuse=reuse) as scope:  #13
        conv5_3 = tf.layers.conv2d(
            inputs=conv5_2,
            filters=512,
            kernel_size=(3, 3),
            padding="same",
            kernel_initializer=tf.contrib.layers.xavier_initializer())
        if bn:
            conv5_3 = tf.contrib.layers.batch_norm(conv5_3,
                                                   fused=True,
                                                   decay=0.9,
                                                   is_training=is_training)
        conv5_3 = tf.nn.relu(conv5_3)
        pool5 = tf.nn.max_pool(conv5_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool')  #pool5
        if not reuse:
            print('pool5', pool5.get_shape())
        grads_dict['conv5_3'] = tf.gradients(conv5_3, images)
        grads_dict['pool5'] = tf.gradients(pool5, images)

    ########################################################################################

    #with tf.variable_scope('unpool5', reuse=reuse) as scope:#14
    #    #unpool5 = gen_nn_ops._max_pool_grad(idx_map, pool5, pool5, [1,3,3,1], [1,2,2,1],'VALID')
    #    unpool5 = gen_nn_ops._max_pool_grad(conv5_3, pool5, pool5, [1,2,2,1], [1,2,2,1],'VALID')
    #    print('unpool5', unpool5.get_shape())
    #    #tf.summary.image('unpool5', unpool5)
    #
    #with tf.variable_scope('unpool4', reuse=reuse) as scope:#17
    #    unpool4 = gen_nn_ops._max_pool_grad(conv4_3, pool4, unpool5, [1,2,2,1], [1,2,2,1],'VALID')
    #    conv4_1D = tf.layers.conv2d(unpool4, filters=256, kernel_size=(1,1),
    #            padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
    #    print('unpool4', unpool4.get_shape())
    #    print('conv4_1D', conv4_1D.get_shape())
    #    #tf.summary.image('unpool4', unpool4)

    #with tf.variable_scope('unpool3', reuse=reuse) as scope:#20
    #    #unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, unpool4, [1,2,2,1], [1,2,2,1],'VALID')
    #    unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, conv4_1D, [1,2,2,1], [1,2,2,1],'VALID')
    #    conv3_1D = tf.layers.conv2d(unpool3, filters=128, kernel_size=(1,1),
    #            padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
    #    print('unpool3', unpool3.get_shape())
    #    print('conv3_1D', conv3_1D.get_shape())
    #    #tf.summary.image('unpool3', unpool3)

    #with tf.variable_scope('unpool2', reuse=reuse) as scope:#23
    #    #unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, unpool3, [1,2,2,1], [1,2,2,1],'VALID')
    #    unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, conv3_1D, [1,2,2,1], [1,2,2,1],'VALID')
    #    conv2_1D = tf.layers.conv2d(unpool2, filters=64, kernel_size=(1,1),
    #            padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
    #    print('unpool2', unpool2.get_shape())
    #    print('conv2_1D', conv2_1D.get_shape())
    #    #tf.summary.image('unpool2', unpool2)

    #with tf.variable_scope('unpool1', reuse=reuse) as scope:#25
    #    #unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, unpool2, [1,2,2,1], [1,2,2,1],'VALID')
    #    unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, conv2_1D, [1,2,2,1], [1,2,2,1],'VALID')
    #    conv1_1D = tf.layers.conv2d(unpool1, filters=1, kernel_size=(1,1),
    #            padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
    #    print('unpool1', unpool1.get_shape())
    #    print('conv1_1D', conv1_1D.get_shape())
    #    tf.summary.image('conv1_1D', conv1_1D)
    #    print('conv1_1D.shape', conv1_1D.get_shape())

    ########################################################################################
    with tf.variable_scope('unpool5', reuse=reuse) as scope:  #14
        #unpool5 = gen_nn_ops._max_pool_grad(idx_map, pool5, pool5, [1,3,3,1], [1,2,2,1],'VALID')
        unpool5 = gen_nn_ops._max_pool_grad(conv5_3, pool5, pool5,
                                            [1, 2, 2, 1], [1, 2, 2, 1],
                                            'VALID')
        if not reuse:
            print('unpool5', unpool5.get_shape())
        #tf.summary.image('unpool5', unpool5)

    with tf.variable_scope('unpool4', reuse=reuse) as scope:  #17
        unpool4 = gen_nn_ops._max_pool_grad(conv4_3, pool4, unpool5,
                                            [1, 2, 2, 1], [1, 2, 2, 1],
                                            'VALID')
        #conv4_1D = tf.layers.conv2d(unpool4, filters=256, kernel_size=(1,1),
        #        padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
        if not reuse:
            print('unpool4', unpool4.get_shape())
        #print('conv4_1D', conv4_1D.get_shape())
        #tf.summary.image('unpool4', unpool4)

    with tf.variable_scope('unpool3', reuse=reuse) as scope:  #20
        unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, unpool4,
                                            [1, 2, 2, 1], [1, 2, 2, 1],
                                            'VALID')
        #unpool3 = gen_nn_ops._max_pool_grad(conv3_3, pool3, conv4_1D, [1,2,2,1], [1,2,2,1],'VALID')
        #conv3_1D = tf.layers.conv2d(unpool3, filters=128, kernel_size=(1,1),
        #        padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
        if not reuse:
            print('unpool3', unpool3.get_shape())
        #print('conv3_1D', conv3_1D.get_shape())
        #tf.summary.image('unpool3', unpool3)

    with tf.variable_scope('unpool2', reuse=reuse) as scope:  #23
        unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, unpool3,
                                            [1, 2, 2, 1], [1, 2, 2, 1],
                                            'VALID')
        #unpool2 = gen_nn_ops._max_pool_grad(conv2_2, pool2, conv3_1D, [1,2,2,1], [1,2,2,1],'VALID')
        #conv2_1D = tf.layers.conv2d(unpool2, filters=64, kernel_size=(1,1),
        #        padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
        if not reuse:
            print('unpool2', unpool2.get_shape())
        #print('conv2_1D', conv2_1D.get_shape())
        #tf.summary.image('unpool2', unpool2)

    with tf.variable_scope('unpool1', reuse=reuse) as scope:  #25
        unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, unpool2,
                                            [1, 2, 2, 1], [1, 2, 2, 1],
                                            'VALID')
        unpool1 = tf.reduce_max(unpool1, (3), keep_dims=True)
        #unpool1 = gen_nn_ops._max_pool_grad(conv1_2, pool1, conv2_1D, [1,2,2,1], [1,2,2,1],'VALID')
        #conv1_1D = tf.layers.conv2d(unpool1, filters=1, kernel_size=(1,1),
        #        padding="same", kernel_initializer=tf.contrib.layers.xavier_initializer())
        if not reuse:
            print('unpool1', unpool1.get_shape())
        #print('conv1_1D', conv1_1D.get_shape())
        #tf.summary.image('conv1_1D', conv1_1D)
        #print('conv1_1D.shape', conv1_1D.get_shape())

        feat = pool5
        unpool = unpool1

    return feat, unpool, grads_dict