コード例 #1
0
ファイル: nn_grad.py プロジェクト: 2er0/tensorflow
def _AvgPoolGrad(op, grad):
  return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
                                   op.get_attr("ksize"),
                                   op.get_attr("strides"),
                                   op.get_attr("padding"),
                                   data_format=op.get_attr("data_format")
                                  )
コード例 #2
0
def _AvgPoolGrad(op, grad):
    return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]),
                                     grad,
                                     op.get_attr("ksize"),
                                     op.get_attr("strides"),
                                     op.get_attr("padding"),
                                     data_format=op.get_attr("data_format"))
コード例 #3
0
 def backprop_avg_pool(self, activation, relevance, ksize, strides,
                       padding):
     z = tf.nn.avg_pool(activation, ksize, strides, padding) + self.epsilon
     s = relevance / z
     c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides,
                                   padding)
     return c * activation
コード例 #4
0
 def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
                 data_format):
   del outputs  # Unused by average-pooling gradients.
   return gen_nn_ops._avg_pool_grad(
       inputs.get_shape().as_list(),
       output_gradients,
       ksize=ksize,
       strides=strides,
       padding=padding,
       data_format=data_format)
コード例 #5
0
    def maxpool_grad_override(cls, op, grad):
        z = tf.nn.avg_pool(op.inputs[0], op.get_attr('ksize'),
                           op.get_attr('strides'),
                           op.get_attr('padding')) + 1e-10
        s = grad / z
        c = gen_nn_ops._avg_pool_grad(tf.shape(op.inputs[0]), s,
                                      op.get_attr('ksize'),
                                      op.get_attr('strides'),
                                      op.get_attr('padding'))

        return op.inputs[0] * c
コード例 #6
0
    def backprop_pool(self, activation, relevance, ksize, strides, pooling_type, padding='SAME'):

        if pooling_type.lower() in 'avg':
            z = nn_ops.avg_pool(activation, ksize, strides, padding) + 1e-10
            s = relevance / z
            c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides, padding)
            return activation * c
        else:
            z = nn_ops.max_pool(activation, ksize, strides, padding) + 1e-10
            s = relevance / z
            c = gen_nn_ops._max_pool_grad(activation, z, s, ksize, strides, padding)
            return activation * c
コード例 #7
0
 def testDirectUseOverlapping(self):
     for num_batches in [1, 3]:
         for row_window_size in [2, 5]:
             for col_window_size in [2, 4]:
                 num_rows = (row_window_size - 1) * 5 + 1
                 num_cols = (col_window_size - 1) * 7 + 1
                 for num_channels in [1, 2]:
                     input_shape = (num_batches, num_rows, num_cols,
                                    num_channels)
                     with self.test_session() as _:
                         input_tensor = tf.constant(
                             self._GenerateRandomInputTensor(
                                 input_shape).astype(np.float32))
                         window_size = [
                             1, row_window_size, col_window_size, 1
                         ]
                         stride_size = [
                             1, row_window_size - 1, col_window_size - 1, 1
                         ]
                         padding = "VALID"
                         output_tensor = tf.nn.avg_pool(
                             input_tensor, window_size, stride_size,
                             padding)
                         output_data = output_tensor.eval()
                         num_elements = 1
                         for dim_size in output_data.shape:
                             num_elements *= dim_size
                         output_backprop = (self._PRNG.rand(num_elements) *
                                            1000).reshape(output_data.shape)
                         input_backprop_tensor = gen_nn_ops._avg_pool_grad(
                             input_tensor.get_shape(), output_backprop,
                             window_size, stride_size, padding)
                         input_backprop = input_backprop_tensor.eval()
                         row_seq = list(
                             range(0, num_rows, row_window_size - 1))
                         col_seq = list(
                             range(0, num_cols, col_window_size - 1))
                         row_seq[-1] += 1
                         col_seq[-1] += 1
                         fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
                             input_tensor.get_shape(),
                             output_backprop,
                             row_seq,
                             col_seq,
                             overlapping=True)
                         fap_input_backprop = fap_input_backprop_tensor.eval(
                         )
                         self.assertShapeEqual(input_backprop,
                                               fap_input_backprop_tensor)
                         self.assertAllClose(input_backprop,
                                             fap_input_backprop)
コード例 #8
0
def backprop_pool(activation,
                  relevance,
                  ksize,
                  strides,
                  pooling_type,
                  padding='VALID'):
    if pooling_type.lower() is 'avg':  # avg pooling
        z = nn_ops.avg_pool(activation, ksize, strides, padding) + 1e-10
        s = relevance / z
        c = gen_nn_ops._avg_pool_grad(tf.shape(activation), s, ksize, strides,
                                      padding)
        return activation * c
    else:  # max pooling
        z = nn_ops.max_pool(activation, ksize, strides, padding) + 1e-10
        s = relevance / z
        c = gen_nn_ops.max_pool_grad(activation, z, s, ksize, strides, padding)
        return activation * c
コード例 #9
0
 def testDirectUseOverlapping(self):
   for num_batches in [1, 3]:
     for row_window_size in [2, 5]:
       for col_window_size in [2, 4]:
         num_rows = (row_window_size - 1) * 5 + 1
         num_cols = (col_window_size - 1) * 7 + 1
         for num_channels in [1, 2]:
           input_shape = (num_batches, num_rows, num_cols, num_channels)
           with self.test_session() as _:
             input_tensor = constant_op.constant(
                 self._GenerateRandomInputTensor(input_shape).astype(
                     np.float32))
             window_size = [1, row_window_size, col_window_size, 1]
             stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
             padding = "VALID"
             output_tensor = nn_ops.avg_pool(input_tensor, window_size,
                                             stride_size, padding)
             output_data = output_tensor.eval()
             num_elements = 1
             for dim_size in output_data.shape:
               num_elements *= dim_size
             output_backprop = (self._PRNG.rand(num_elements) *
                                1000).reshape(output_data.shape)
             input_backprop_tensor = gen_nn_ops._avg_pool_grad(
                 input_tensor.get_shape(), output_backprop, window_size,
                 stride_size, padding)
             input_backprop = input_backprop_tensor.eval()
             row_seq = list(range(0, num_rows, row_window_size - 1))
             col_seq = list(range(0, num_cols, col_window_size - 1))
             row_seq[-1] += 1
             col_seq[-1] += 1
             fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
                 input_tensor.get_shape(),
                 output_backprop,
                 row_seq,
                 col_seq,
                 overlapping=True)
             fap_input_backprop = fap_input_backprop_tensor.eval()
             self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
             self.assertAllClose(input_backprop, fap_input_backprop)