Exemple #1
0
def factorized_pool(input_tensor,
                    window_shape,
                    pooling_type,
                    strides,
                    padding,
                    name=None):
    """Performs m x n pooling through a combination of 1xm and 1xn pooling.

  Args:
    input_tensor: Input tensor. Must be rank 2
    window_shape: Pooling window shape
    pooling_type: Either 'MAX' or 'AVG'
    strides: The stride of the pooling window
    padding: 'SAME' or 'VALID'.
    name: Name of the op

  Returns:
    A rank 2 tensor containing the pooled output

  Raises:
    ValueError: if the input tensor is not rank 2
  """
    if input_tensor.get_shape().ndims != 2:
        raise ValueError('factorized_pool() accepts tensors of rank 2 only')

    [height, width] = input_tensor.get_shape()
    with ops.name_scope(name, 'factorized_pool'):
        input_tensor_aligned = array_ops.reshape(
            input_tensor, [1, 1, height, width],
            name=input_tensor.op.name + '_aligned')

        height_pooling = nn_ops.pool(input_tensor_aligned,
                                     window_shape=[1, window_shape[0]],
                                     pooling_type=pooling_type,
                                     strides=[1, strides[0]],
                                     padding=padding)
        swap_height_width = array_ops.transpose(height_pooling,
                                                perm=[0, 1, 3, 2])

        width_pooling = nn_ops.pool(swap_height_width,
                                    window_shape=[1, window_shape[1]],
                                    pooling_type=pooling_type,
                                    strides=[1, strides[1]],
                                    padding=padding)

    return array_ops.squeeze(array_ops.transpose(width_pooling,
                                                 perm=[0, 1, 3, 2]),
                             axis=[0, 1])
Exemple #2
0
def factorized_pool(input_tensor,
                    window_shape,
                    pooling_type,
                    strides,
                    padding,
                    name=None):
  """Performs m x n pooling through a combination of 1xm and 1xn pooling.

  Args:
    input_tensor: Input tensor. Must be rank 2
    window_shape: Pooling window shape
    pooling_type: Either 'MAX' or 'AVG'
    strides: The stride of the pooling window
    padding: 'SAME' or 'VALID'.
    name: Name of the op

  Returns:
    A rank 2 tensor containing the pooled output

  Raises:
    ValueError: if the input tensor is not rank 2
  """
  if input_tensor.get_shape().ndims != 2:
    raise ValueError('factorized_pool() accepts tensors of rank 2 only')

  [height, width] = input_tensor.get_shape()
  with ops.name_scope(name, 'factorized_pool'):
    input_tensor_aligned = array_ops.reshape(
        input_tensor, [1, 1, height, width],
        name=input_tensor.op.name + '_aligned')

    height_pooling = nn_ops.pool(
        input_tensor_aligned,
        window_shape=[1, window_shape[0]],
        pooling_type=pooling_type,
        strides=[1, strides[0]],
        padding=padding)
    swap_height_width = array_ops.transpose(height_pooling, perm=[0, 1, 3, 2])

    width_pooling = nn_ops.pool(
        swap_height_width,
        window_shape=[1, window_shape[1]],
        pooling_type=pooling_type,
        strides=[1, strides[1]],
        padding=padding)

  return array_ops.squeeze(
      array_ops.transpose(width_pooling, perm=[0, 1, 3, 2]), axis=[0, 1])
Exemple #3
0
def _test_pooling(input_shape, **kwargs):
    """ One iteration of pool operation with given shapes and attributes """

    x = -np.arange(np.prod(input_shape),
                   dtype=np.float32).reshape(input_shape) - 1

    with tf.Graph().as_default():
        in_data = constant_op.constant(x, shape=input_shape, dtype='float32')
        # pylint: disable=unused-variable
        pool = nn_ops.pool(in_data, **kwargs)
        # pylint: enable=unused-variable

        if kwargs['pooling_type'] == 'MAX':
            out_node = 'max_pool'
            out_name = 'max_pool:0'
        else:
            out_node = 'avg_pool'
            out_name = 'avg_pool:0'

        with tf.Session() as sess:
            graph_def = tf.graph_util.convert_variables_to_constants(
                sess,
                sess.graph.as_graph_def(add_shapes=True),
                [out_node],
            )

            tf_output = run_tf_graph(sess, x, 'Const:0', out_name)
            tvm_output = run_tvm_graph(graph_def, x.astype('float32'), "Const",
                                       tf_output.shape, 'float32')
            np.testing.assert_allclose(tf_output,
                                       tvm_output,
                                       atol=1e-3,
                                       rtol=1e-3)

            sess.close()
Exemple #4
0
def _test_pooling_iteration(input_shape, **kwargs):
    """ One iteration of pool operation with given shapes and attributes """

    x = -np.arange(
        np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=input_shape, dtype='float32')
        nn_ops.pool(in_data, **kwargs)

        if kwargs['pooling_type'] == 'MAX':
            out_name = 'max_pool:0'
        else:
            out_name = 'avg_pool:0'

        compare_tf_with_tvm(x, 'Placeholder:0', out_name)
Exemple #5
0
def _test_pooling_iteration(input_shape, **kwargs):
    """ One iteration of pool operation with given shapes and attributes """

    x = -np.arange(
        np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=input_shape, dtype='float32')
        nn_ops.pool(in_data, **kwargs)

        if kwargs['pooling_type'] == 'MAX':
            out_name = 'max_pool:0'
        else:
            out_name = 'avg_pool:0'

        compare_tf_with_tvm(x, 'Placeholder:0', out_name)
Exemple #6
0
    def call(self, inputs, **kwargs):
        if type(inputs) is list:
            features = inputs[0]
            mask = inputs[1]
        else:
            # if no maks is provided, get it from the features
            features = inputs
            mask = tf.expand_dims(tf.reduce_sum(features, axis=-1), axis=-1)
            mask = tf.where(tf.equal(mask, 0), tf.zeros_like(mask), tf.ones_like(mask)) 
            
        features = tf.multiply(features, mask)
        features = nn_ops.convolution(features, self.kernel, self.padding.upper(), self.strides, self.dilation_rate)

        kernel = tf.ones([*self.kernel_size, 1, 1])
        norm = nn_ops.convolution(mask, kernel, self.padding.upper(), self.strides, self.dilation_rate)
        
        if self.binary:
            mask = nn_ops.pool(mask, self.kernel_size, 'MAX', self.padding.upper(), self.dilation_rate, self.strides)
        else:
            mask = norm / np.prod(self.kernel_size)
        
        norm = tf.where(tf.equal(norm,0), tf.zeros_like(norm), tf.reciprocal(norm))
        
        features = tf.multiply(features, norm)
        if self.use_bias:
            features = tf.add(features, self.bias)
        
        return [features, mask]
Exemple #7
0
 def _test(self, input_shape, **kwargs):
     # Use negative numbers to make sure there isn't any zero padding getting
     # used.
     x = -np.arange(np.prod(input_shape),
                    dtype=np.float32).reshape(input_shape) - 1
     y1 = pool_direct(input=x, **kwargs)
     y2 = nn_ops.pool(input=x, **kwargs)
     self.assertAllClose(y1, self.evaluate(y2), rtol=1e-2, atol=1e-2)
Exemple #8
0
 def _test(self, input_shape, **kwargs):
   # Use negative numbers to make sure there isn't any zero padding getting
   # used.
   x = -np.arange(
       np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
   y1 = pool_direct(input=x, **kwargs)
   y2 = nn_ops.pool(input=x, **kwargs)
   self.assertAllClose(y1, self.evaluate(y2), rtol=1e-2, atol=1e-2)
Exemple #9
0
    def _maybe_update_block_mask(self, weights, threshold):
        """Performs block-granular masking of the weights.

    Block pruning occurs only if the block_height or block_width is > 1 and
    if the weight tensor has ndims = 2. Otherwise, elementwise pruning occurs.
    Args:
      weights: The weight tensor that needs to be masked.
      threshold: The current threshold value. The function will compute a new
        threshold and return the exponential moving average using the current
        value of threshold

    Returns:
      new_threshold: The new value of the threshold based on weights, and
        sparsity at the current global_step
      new_mask: A numpy array of the same size and shape as weights containing
        0 or 1 to indicate which of the values in weights falls below
        the threshold

    Raises:
      ValueError: if block pooling function is not AVG or MAX
    """
        if weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
            return self._update_mask(weights, threshold)

        if self._block_pooling_function not in ['AVG', 'MAX']:
            raise ValueError(
                'Unknown pooling function for block sparsity: %s' %
                self._block_pooling_function)

        with ops.name_scope(weights.op.name + '_pruning_ops'):
            abs_weights = math_ops.abs(
                array_ops.reshape(
                    weights,
                    [1, weights.get_shape()[0],
                     weights.get_shape()[1], 1]))
            pool_window = [self._block_dim[0], self._block_dim[1]]
            pooled_weights = nn_ops.pool(
                abs_weights,
                window_shape=pool_window,
                pooling_type=self._block_pooling_function,
                strides=pool_window,
                padding='SAME',
                name=weights.op.name + '_pooled')

            smoothed_threshold, new_mask = self._update_mask(
                pooled_weights, threshold)

            reshaped_mask = array_ops.reshape(
                new_mask,
                [pooled_weights.get_shape()[1],
                 pooled_weights.get_shape()[2]])
            updated_mask = _kronecker_product(reshaped_mask,
                                              array_ops.ones(self._block_dim))
            sliced_mask = array_ops.slice(
                updated_mask, [0, 0],
                [weights.get_shape()[0],
                 weights.get_shape()[1]])
        return smoothed_threshold, sliced_mask
Exemple #10
0
  def _maybe_update_block_mask(self, weights, threshold):
    """Performs block-granular masking of the weights.

    Block pruning occurs only if the block_height or block_width is > 1 and
    if the weight tensor has ndims = 2. Otherwise, elementwise pruning occurs.
    Args:
      weights: The weight tensor that needs to be masked.
      threshold: The current threshold value. The function will compute a new
        threshold and return the exponential moving average using the current
        value of threshold

    Returns:
      new_threshold: The new value of the threshold based on weights, and
        sparsity at the current global_step
      new_mask: A numpy array of the same size and shape as weights containing
        0 or 1 to indicate which of the values in weights falls below
        the threshold

    Raises:
      ValueError: if block pooling function is not AVG or MAX
    """
    if weights.get_shape().ndims != 2 or self._block_dim == [1, 1]:
      return self._update_mask(weights, threshold)

    if self._block_pooling_function not in ['AVG', 'MAX']:
      raise ValueError('Unknown pooling function for block sparsity: %s' %
                       self._block_pooling_function)

    with ops.name_scope(weights.op.name + '_pruning_ops'):
      abs_weights = math_ops.abs(
          array_ops.reshape(
              weights, [1, weights.get_shape()[0],
                        weights.get_shape()[1], 1]))
      pool_window = [self._block_dim[0], self._block_dim[1]]
      pooled_weights = nn_ops.pool(
          abs_weights,
          window_shape=pool_window,
          pooling_type=self._block_pooling_function,
          strides=pool_window,
          padding='SAME',
          name=weights.op.name + '_pooled')

      smoothed_threshold, new_mask = self._update_mask(pooled_weights,
                                                       threshold)

      reshaped_mask = array_ops.reshape(
          new_mask,
          [pooled_weights.get_shape()[1],
           pooled_weights.get_shape()[2]])
      updated_mask = _kronecker_product(reshaped_mask,
                                        array_ops.ones(self._block_dim))
      sliced_mask = array_ops.slice(
          updated_mask, [0, 0],
          [weights.get_shape()[0],
           weights.get_shape()[1]])
    return smoothed_threshold, sliced_mask
Exemple #11
0
 def _test_gradient(self, input_shape, **kwargs):
   x_val = -np.arange(
       np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
   x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
   output = nn_ops.pool(input=x, **kwargs)
   y_shape = output.get_shape().as_list()
   err = gradient_checker.compute_gradient_error(
       [x], [input_shape], output, y_shape, x_init_value=[x_val])
   err_tolerance = 1e-2
   self.assertLess(err, err_tolerance)
Exemple #12
0
def _test_pooling_iteration(input_shape, **kwargs):
    """ One iteration of pool operation with given shapes and attributes """

    x = -np.arange(np.prod(input_shape),
                   dtype=np.float32).reshape(input_shape) - 1

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=input_shape, dtype='float32')
        out = nn_ops.pool(in_data, **kwargs)

        compare_tflite_with_tvm(x, 'Placeholder:0', [in_data], [out])
 def _compare_pooling_methods(self, weights, pooling_kwargs):
   with self.test_session():
     variables.global_variables_initializer().run()
     pooled_weights_tf = array_ops.squeeze(
         nn_ops.pool(
             array_ops.reshape(
                 weights,
                 [1, weights.get_shape()[0],
                  weights.get_shape()[1], 1]), **pooling_kwargs))
     pooled_weights_factorized_pool = pruning_utils.factorized_pool(
         weights, **pooling_kwargs)
     self.assertAllClose(pooled_weights_tf.eval(),
                         pooled_weights_factorized_pool.eval())
Exemple #14
0
def _test_pooling_iteration(input_shape, **kwargs):
    """ One iteration of pool operation with given shapes and attributes """

    x = -np.arange(
        np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
    tvm_data = np.transpose(x, axes=(0, 3, 1, 2))

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=input_shape, dtype='float32')
        out = nn_ops.pool(in_data, **kwargs)

        compare_tflite_with_tvm(x, tvm_data, 'Placeholder:0', [in_data], [out],
                                output_need_transpose=True)
def pool_forward_tf(mats, fsize, stride=1, padding="VALID", method="MAX"):

    strd = [stride, stride]
    poolshape = [fsize, fsize]
    out = nn_ops.pool(mats.astype(np.float64),\
                      poolshape,\
                      method,\
                      padding=padding,\
                      strides=strd)

    #     strd = [1,stride,stride,1]
    #     poolshape = [1,fsize,fsize,1]
    #     out = tf.nn.max_pool(mats,poolshape,strd,padding,)
    return np.array(out)