Пример #1
0
    def test_nchw(self, padding):
        self.ksize = [1, 1, 3, 1]
        np_nchw = self.grad_input_nchw[padding]
        if padding == "VALID":
            grad_input = tf.placeholder(tf.float32, shape=(128, 3, 74, 224))
        elif padding == "SAME":
            grad_input = tf.placeholder(tf.float32, shape=(128, 3, 75, 224))

        out_ngtf = avg_pool_grad(self.forward_arg_shape_NCHW,
                                 grad_input,
                                 self.ksize,
                                 self.strides,
                                 padding=padding,
                                 data_format="NCHW")

        # To validate on the CPU side we will need to run in NHWC, because the CPU
        # implementation of avgpool backprop does not support NCHW. We will
        # transpose on the way in and on the way out
        grad_input_transposed = tf.transpose(grad_input, [0, 2, 3, 1])
        self.ksize = [1, 3, 1, 1]
        self.strides = [1, 3, 1, 2]
        b = avg_pool_grad(self.forward_arg_shape_NHWC,
                          grad_input_transposed,
                          self.ksize,
                          self.strides,
                          padding=padding,
                          data_format="NHWC")
        out_tf = tf.transpose(b, [0, 3, 1, 2])
        assert np.isclose(
            self.with_ngraph(lambda sess: sess.run(
                out_ngtf,
                feed_dict={grad_input: self.grad_input_nchw[padding]})),
            self.without_ngraph(lambda sess: sess.run(
                out_tf, feed_dict={grad_input: self.grad_input_nchw[padding]}))
        ).all()
Пример #2
0
def _AvgPoolGrad(op, grad):
    return gen_nn_ops.avg_pool_grad(array_ops.shape(op.inputs[0]),
                                    grad,
                                    op.get_attr("ksize"),
                                    op.get_attr("strides"),
                                    op.get_attr("padding"),
                                    data_format=op.get_attr("data_format"))
Пример #3
0
 def dfa_backward(self, AI, AO, E, DO):
     grad = gen_nn_ops.avg_pool_grad(orig_input_shape=self.size,
                                     grad=DO,
                                     ksize=self.ksize,
                                     strides=self.strides,
                                     padding=self.padding)
     return grad
Пример #4
0
 def backward(self, AI, AO, DO, cache):
     DI = gen_nn_ops.avg_pool_grad(orig_input_shape=self.size,
                                   grad=DO,
                                   ksize=self.ksize,
                                   strides=self.strides,
                                   padding=self.padding)
     return {'dout': DI, 'cache': {}}
Пример #5
0
    def relprop_pool(self, x, r, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME'):
        """Implements relevance propagation through pooling layers.

        Args:
            x: array of activations
            r: array of relevance scores
            ksize: pooling kernel dimensions used during forward path
            strides: step size of pooling kernel used during forward path
            padding: parameter for SAME or VALID padding

        Returns:
            array of relevance scores of same dimensions as a

        """
        if self.pooling_type == "avg":
            z = tf.nn.avg_pool(x, ksize, strides, padding) + self.epsilon
            s = r / z
            c = gen_nn_ops.avg_pool_grad(tf.shape(x), s, ksize, strides, padding)
        elif self.pooling_type == "max":
            z = tf.nn.max_pool(x, ksize, strides, padding) + self.epsilon
            s = r / z
            c = gen_nn_ops.max_pool_grad_v2(x, z, s, ksize, strides, padding)
        else:
            raise Exception("Error: no such unpooling operation implemented.")
        return c * x
Пример #6
0
def _AvgPoolGrad(op, grad):
  return gen_nn_ops.avg_pool_grad(
      array_ops.shape(op.inputs[0]),
      grad,
      op.get_attr("ksize"),
      op.get_attr("strides"),
      op.get_attr("padding"),
      data_format=op.get_attr("data_format"))
Пример #7
0
 def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides,
                 padding, data_format):
     del outputs  # Unused by average-pooling gradients.
     return gen_nn_ops.avg_pool_grad(inputs.get_shape().as_list(),
                                     output_gradients,
                                     ksize=ksize,
                                     strides=strides,
                                     padding=padding,
                                     data_format=data_format)
Пример #8
0
 def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
                 data_format):
   del outputs  # Unused by average-pooling gradients.
   return gen_nn_ops.avg_pool_grad(
       inputs.get_shape().as_list(),
       output_gradients,
       ksize=ksize,
       strides=strides,
       padding=padding,
       data_format=data_format)
Пример #9
0
 def relprop_pool(self, a, r, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME', operation='avg'):
     if operation == 'avg':
         z = tf.nn.avg_pool(a, ksize, strides, padding) + self.epsilon
         s = r / z
         c = gen_nn_ops.avg_pool_grad(tf.shape(a), s, ksize, strides, padding)
     elif operation == 'max':
         z = tf.nn.max_pool(a, ksize, strides, padding) + self.epsilon
         s = r / z
         c = gen_nn_ops.max_pool_grad_v2(a, z, s, ksize, strides, padding)
     else:
         raise Exception('No such unpooling operation.')
     return c * a
Пример #10
0
 def testDirectUseOverlapping(self):
     for num_batches in [1, 3]:
         for row_window_size in [2, 5]:
             for col_window_size in [2, 4]:
                 num_rows = (row_window_size - 1) * 5 + 1
                 num_cols = (col_window_size - 1) * 7 + 1
                 for num_channels in [1, 2]:
                     input_shape = (num_batches, num_rows, num_cols,
                                    num_channels)
                     with self.cached_session() as _:
                         input_tensor = constant_op.constant(
                             self._GenerateRandomInputTensor(
                                 input_shape).astype(np.float32))
                         window_size = [
                             1, row_window_size, col_window_size, 1
                         ]
                         stride_size = [
                             1, row_window_size - 1, col_window_size - 1, 1
                         ]
                         padding = "VALID"
                         output_tensor = nn_ops.avg_pool(
                             input_tensor, window_size, stride_size,
                             padding)
                         output_data = self.evaluate(output_tensor)
                         num_elements = 1
                         for dim_size in output_data.shape:
                             num_elements *= dim_size
                         output_backprop = (self._PRNG.rand(num_elements) *
                                            1000).reshape(output_data.shape)
                         input_backprop_tensor = gen_nn_ops.avg_pool_grad(
                             input_tensor.get_shape(), output_backprop,
                             window_size, stride_size, padding)
                         input_backprop = self.evaluate(
                             input_backprop_tensor)
                         row_seq = list(
                             range(0, num_rows, row_window_size - 1))
                         col_seq = list(
                             range(0, num_cols, col_window_size - 1))
                         row_seq[-1] += 1
                         col_seq[-1] += 1
                         fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
                             input_tensor.get_shape(),
                             output_backprop,
                             row_seq,
                             col_seq,
                             overlapping=True)
                         fap_input_backprop = self.evaluate(
                             fap_input_backprop_tensor)
                         self.assertShapeEqual(input_backprop,
                                               fap_input_backprop_tensor)
                         self.assertAllClose(input_backprop,
                                             fap_input_backprop)
Пример #11
0
 def lel_backward(self, AI, AO, E, DO, Y):
     shape_AO = tf.shape(AO)[3]
     shape_DO = tf.shape(DO)[3]
     assert_op = tf.assert_equal(shape_AO, shape_DO)
     with tf.control_dependencies([assert_op]):
         grad = gen_nn_ops.avg_pool_grad(orig_input_shape=self.size,
                                         grad=DO,
                                         ksize=self.ksize,
                                         strides=self.strides,
                                         padding=self.padding)
         # grad = tf.Print(grad, [tf.shape(AI), tf.shape(AO), tf.shape(DO)], message='', summarize=1000)
         # grad = tf.Print(grad, ['pool', tf.reduce_sum(DO), tf.reduce_sum(grad)], message='', summarize=1000)
         return grad
Пример #12
0
        def custom_grad(op, grad):
            if self.data_format == 'NHWC':
                ksizes = [1, self.pool_size[0], self.pool_size[1], 1]
                strides = [1, self.strides[0], self.strides[1], 1]
            else:
                ksizes = [1, 1, self.pool_size[0], self.pool_size[1]]
                strides = [1, 1, self.strides[0], self.strides[1]]

            #return gen_nn_ops.max_pool_grad(
            return gen_nn_ops.avg_pool_grad(
                array_ops.shape(op.inputs[0]),
                grad,
                ksizes,
                strides,
                self.padding,
                data_format=self.data_format), K.tf.constant(0.0)
Пример #13
0
    def predict(self, img, cv, lrl):

        fts = cv
        for i in range(len(encs) - 1):
            fts = conv(self, 'enc%d' % i, fts, [1, encs[i + 1], encs[i]], ifbn)

        fts = tf.concat([lrl, fts], axis=3)
        fts = conv(self, 'cenc', fts, [3, encs[-1], 3 + encs[-1]], ifbn)
        for i in range(nfinal - 1):
            fts = conv(self, 'cenc%d' % (i + 1), fts, [3, encs[-1], encs[-1]],
                       ifbn)

        fshp = tf.shape(fts)
        inp = tf.concat([lrl, fts], axis=3)

        ypad = tf.mod(topad - tf.mod(fshp[1], topad), topad)
        ypad = [ypad // 2, ypad - ypad // 2]
        xpad = tf.mod(topad - tf.mod(fshp[2], topad), topad)
        xpad = [xpad // 2, xpad - xpad // 2]
        inp = tf.pad(
            inp, tf.stack([[0, 0],
                           tf.stack(ypad),
                           tf.stack(xpad), [0, 0]]), 'REFLECT')

        out = unet(self, inp, numCh, nChInc, nlev, 'u1', encs[-1] + 3)
        out = tf.slice(out, tf.stack([0, ypad[0], xpad[0], 0]),
                       tf.stack([-1, fshp[1], fshp[2], -1]))
        out = tf.nn.relu(
            conv(self, 'out', out, [1, 1, numCh], 0, False) + 128.0)

        shp = tf.shape(img)
        out1 = gen_nn_ops.avg_pool_grad(
            tf.stack([fshp[0], fshp[1] * 2, fshp[2] * 2, 1]), out,
            [1, 2, 2, 1], [1, 2, 2, 1], 'VALID') * 4.0
        if upsamp:
            out2 = tf.image.resize_bilinear(
                out, tf.stack([fshp[1] * 2, fshp[2] * 2]))
            out = tf.where(tf.abs(out1 - out2) < 1, out2, out1)
        else:
            out = out1

        out = tf.slice(
            out, tf.stack([0, fshp[1] * 2 - shp[1], fshp[2] * 2 - shp[2], 0]),
            [-1, -1, -1, -1])

        return tf.squeeze(out, axis=-1)
Пример #14
0
    def test_nhwc(self, padding):
        np_nhwc = self.grad_input_nhwc[padding]
        if padding == "VALID":
            grad_input = tf.placeholder(tf.float32, shape=(128, 112, 74, 3))
        elif padding == "SAME":
            grad_input = tf.placeholder(tf.float32, shape=(128, 112, 75, 3))
        out = avg_pool_grad(self.forward_arg_shape_NHWC,
                            grad_input,
                            self.ksize,
                            self.strides,
                            padding=padding,
                            data_format="NHWC")

        def run_test(sess):
            return sess.run(out, feed_dict={grad_input: np_nhwc})

        assert np.isclose(self.with_ngraph(run_test),
                          self.without_ngraph(run_test)).all()
 def testDirectUseOverlapping(self):
   for num_batches in [1, 3]:
     for row_window_size in [2, 5]:
       for col_window_size in [2, 4]:
         num_rows = (row_window_size - 1) * 5 + 1
         num_cols = (col_window_size - 1) * 7 + 1
         for num_channels in [1, 2]:
           input_shape = (num_batches, num_rows, num_cols, num_channels)
           with self.cached_session() as _:
             input_tensor = constant_op.constant(
                 self._GenerateRandomInputTensor(input_shape).astype(
                     np.float32))
             window_size = [1, row_window_size, col_window_size, 1]
             stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
             padding = "VALID"
             output_tensor = nn_ops.avg_pool(input_tensor, window_size,
                                             stride_size, padding)
             output_data = self.evaluate(output_tensor)
             num_elements = 1
             for dim_size in output_data.shape:
               num_elements *= dim_size
             output_backprop = (self._PRNG.rand(num_elements) *
                                1000).reshape(output_data.shape)
             input_backprop_tensor = gen_nn_ops.avg_pool_grad(
                 input_tensor.get_shape(), output_backprop, window_size,
                 stride_size, padding)
             input_backprop = self.evaluate(input_backprop_tensor)
             row_seq = list(range(0, num_rows, row_window_size - 1))
             col_seq = list(range(0, num_cols, col_window_size - 1))
             row_seq[-1] += 1
             col_seq[-1] += 1
             fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
                 input_tensor.get_shape(),
                 output_backprop,
                 row_seq,
                 col_seq,
                 overlapping=True)
             fap_input_backprop = self.evaluate(fap_input_backprop_tensor)
             self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
             self.assertAllClose(input_backprop, fap_input_backprop)
Пример #16
0
conv3 = conv_op(pool2, w3)
pool3 = tf.nn.avg_pool(conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

flat = tf.reshape(pool3, [batch_size, 4*4*2*f3])
fc1 = tf.matmul(flat, w4)

loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=fc1)
[grad] = tf.gradients(loss, [conv1])

####################################

do     = tf.nn.softmax(fc1) - y
dfc1   = tf.matmul(do, tf.transpose(w4))
dflat  = tf.reshape(dfc1, [1, 4, 4, 256])

dpool3 = gen_nn_ops.avg_pool_grad(orig_input_shape=[1,8,8,256], grad=dflat, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
dact3  = dpool3 * tf.cast(tf.abs(conv3) > 0.0, dtype=tf.float32)
dconv3 = tf.nn.conv2d_backprop_input(input_sizes=[1,8,8,192], filter=tf.sign(tf.concat([w3[0], -1.0 * w3[1]], axis=3)), out_backprop=dact3, strides=[1,1,1,1], padding='SAME')

dpool2 = gen_nn_ops.avg_pool_grad(orig_input_shape=[1,16,16,192], grad=dconv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
dact2  = dpool2 * tf.cast(tf.abs(conv2) > 0.0, dtype=tf.float32)
dconv2 = tf.nn.conv2d_backprop_input(input_sizes=[1,16,16,128], filter=tf.sign(tf.concat([w2[0], -1.0 * w2[1]], axis=3)), out_backprop=dact2, strides=[1,1,1,1], padding='SAME')

dpool1 = gen_nn_ops.avg_pool_grad(orig_input_shape=[1,32,32,128], grad=dconv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
dact1  = dpool1 * tf.cast(tf.abs(conv1) > 0.0, dtype=tf.float32)
dconv1 = tf.nn.conv2d_backprop_input(input_sizes=[1,32,32,6], filter=tf.sign(tf.concat([w1[0], -1.0 * w1[1]], axis=3)), out_backprop=dact1, strides=[1,1,1,1], padding='SAME')

####################################

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()