def testDirectNotUseOverlapping(self):
   for num_batches in [1, 3]:
     for row_window_size in [2, 5]:
       for col_window_size in [2, 4]:
         num_rows = row_window_size * 5
         num_cols = col_window_size * 7
         for num_channels in [1, 2]:
           input_shape = (num_batches, num_rows, num_cols, num_channels)
           with self.cached_session() as _:
             input_tensor = constant_op.constant(
                 self._GenerateUniqueRandomInputTensor(input_shape))
             window_size = [1, row_window_size, col_window_size, 1]
             stride_size = [1, row_window_size, col_window_size, 1]
             padding = "VALID"
             output_tensor = nn_ops.max_pool(input_tensor, window_size,
                                             stride_size, padding)
             output_data = self.evaluate(output_tensor)
             output_backprop = self._PRNG.randint(100, size=output_data.shape)
             input_backprop_tensor = gen_nn_ops.max_pool_grad(
                 input_tensor, output_tensor, output_backprop, window_size,
                 stride_size, padding)
             input_backprop = self.evaluate(input_backprop_tensor)
             row_seq = list(range(0, num_rows + 1, row_window_size))
             col_seq = list(range(0, num_cols + 1, col_window_size))
             fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
                 input_tensor,
                 output_tensor,
                 output_backprop,
                 row_seq,
                 col_seq,
                 overlapping=False)
             fmp_input_backprop = self.evaluate(fmp_input_backprop_tensor)
             self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
             self.assertAllClose(input_backprop, fmp_input_backprop)
Example #2
0
 def testDirectUseOverlapping(self):
     for num_batches in [1, 3]:
         for row_window_size in [2, 5]:
             for col_window_size in [2, 4]:
                 num_rows = (row_window_size - 1) * 5 + 1
                 num_cols = (col_window_size - 1) * 7 + 1
                 for num_channels in [1, 2]:
                     input_shape = (num_batches, num_rows, num_cols,
                                    num_channels)
                     with self.test_session() as _:
                         input_tensor = constant_op.constant(
                             self._GenerateUniqueRandomInputTensor(
                                 input_shape))
                         window_size = [
                             1, row_window_size, col_window_size, 1
                         ]
                         stride_size = [
                             1, row_window_size - 1, col_window_size - 1, 1
                         ]
                         padding = "VALID"
                         output_tensor = nn_ops.max_pool(
                             input_tensor, window_size, stride_size,
                             padding)
                         output_data = output_tensor.eval()
                         output_backprop = self._PRNG.randint(
                             100, size=output_data.shape)
                         input_backprop_tensor = gen_nn_ops.max_pool_grad(
                             input_tensor, output_tensor, output_backprop,
                             window_size, stride_size, padding)
                         input_backprop = input_backprop_tensor.eval()
                         row_seq = list(
                             range(0, num_rows, row_window_size - 1))
                         col_seq = list(
                             range(0, num_cols, col_window_size - 1))
                         row_seq[-1] += 1
                         col_seq[-1] += 1
                         fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
                             input_tensor,
                             output_tensor,
                             output_backprop,
                             row_seq,
                             col_seq,
                             overlapping=True)
                         fmp_input_backprop = fmp_input_backprop_tensor.eval(
                         )
                         self.assertShapeEqual(input_backprop,
                                               fmp_input_backprop_tensor)
                         self.assertAllClose(input_backprop,
                                             fmp_input_backprop)
Example #3
0
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
  """Returns gradient for FractionalMaxPool.

  Since FractionalMaxPool has three outputs, there are three gradients passed in
  for each of the outputs. Only the first one is useful, the other two gradients
  are empty.

  Args:
    op: The FractionalMaxPoolOp.
    grad_0: Gradient with respect to op.outputs[0]
    unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
    unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.

  Returns:
    Input backprop for FractionalMaxPool op.
  """
  return gen_nn_ops.fractional_max_pool_grad(
      op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2],
      op.get_attr("overlapping"))
Example #4
0
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
  """Returns gradient for FractionalMaxPool.

  Since FractionalMaxPool has three outputs, there are three gradients passed in
  for each of the outputs. Only the first one is useful, the other two gradients
  are empty.

  Args:
    op: The FractionalMaxPoolOp.
    grad_0: Gradient with respect to op.outputs[0]
    unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
    unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.

  Returns:
    Input backprop for FractionalMaxPool op.
  """
  return gen_nn_ops.fractional_max_pool_grad(
      op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2],
      op.get_attr("overlapping"))
 def testDirectUseOverlapping(self):
   for num_batches in [1, 3]:
     for row_window_size in [2, 5]:
       for col_window_size in [2, 4]:
         num_rows = (row_window_size - 1) * 5 + 1
         num_cols = (col_window_size - 1) * 7 + 1
         for num_channels in [1, 2]:
           input_shape = (num_batches, num_rows, num_cols, num_channels)
           with self.test_session() as _:
             input_tensor = constant_op.constant(
                 self._GenerateUniqueRandomInputTensor(input_shape))
             window_size = [1, row_window_size, col_window_size, 1]
             stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
             padding = "VALID"
             output_tensor = nn_ops.max_pool(input_tensor, window_size,
                                             stride_size, padding)
             output_data = output_tensor.eval()
             output_backprop = self._PRNG.randint(100, size=output_data.shape)
             input_backprop_tensor = gen_nn_ops.max_pool_grad(
                 input_tensor, output_tensor, output_backprop, window_size,
                 stride_size, padding)
             input_backprop = input_backprop_tensor.eval()
             row_seq = list(range(0, num_rows, row_window_size - 1))
             col_seq = list(range(0, num_cols, col_window_size - 1))
             row_seq[-1] += 1
             col_seq[-1] += 1
             fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
                 input_tensor,
                 output_tensor,
                 output_backprop,
                 row_seq,
                 col_seq,
                 overlapping=True)
             fmp_input_backprop = fmp_input_backprop_tensor.eval()
             self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
             self.assertAllClose(input_backprop, fmp_input_backprop)
Example #6
0
    def testWhenRepeatedMaxValueInPoolingRegion(self):
        """Test when there's repeating value in pooling region.

    There's no formal definition for what the gradient should be when there're
    multiple max value within a pooling cell. Such as
        | 1 5 |
        | 5 3 |
    The expected result depends heavily on implementation, if someone swap the
    order of a nested for loop when walking through the tensor, result would be
    very different.

    The goal of this test is to alert when someone else change the
    implementation. Current implementation scans row-by-row.
    """
        input_data = [
            5.0, 4.0, 6.0, 7.0, 3.0, 5.0, 9.0, 6.0, 8.0, 8.0, 9.0, 5.0, 7.0,
            4.0, 0.0, 0.0
        ]  # pyformat: disable
        input_size = [1, 4, 4, 1]
        output_backprop = [12.0, 15.0, 17.0, -5.0, 6.0,
                           21.0]  # pyformat: disable
        row_seq = [0, 1, 3, 4]
        col_seq = [0, 2, 4]
        output_data_not_overlapping = [5.0, 7.0, 8.0, 9.0, 7.0,
                                       0.0]  # pyformat: disable
        output_data_overlapping = [9.0, 9.0, 9.0, 9.0, 7.0,
                                   0.0]  # pyformat: disable
        output_size = [1, 3, 2, 1]
        expected_input_backprop_not_overlapping = np.reshape([
            12.0, 0.0, 0.0, 15.0, 0.0, 0.0, -5.0, 0.0, 17.0, 0.0, 0.0, 0.0,
            6.0, 0.0, 21.0, 0.0
        ], input_size)  # pyformat: disable
        expected_input_backprop_overlapping = np.reshape([
            0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 39.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0,
            0.0, 21.0, 0.0
        ], input_size)  # pyformat: disable
        with self.test_session() as _:
            # Test when overlapping is False
            input_tensor = constant_op.constant(input_data, shape=input_size)
            output_tensor = constant_op.constant(output_data_not_overlapping,
                                                 shape=output_size)
            grad = constant_op.constant(output_backprop, shape=output_size)
            r = gen_nn_ops.fractional_max_pool_grad(input_tensor,
                                                    output_tensor,
                                                    grad,
                                                    row_seq,
                                                    col_seq,
                                                    overlapping=False)
            input_backprop_not_overlapping = r.eval()
            self.assertShapeEqual(
                np.reshape(expected_input_backprop_not_overlapping,
                           input_size), r)
            self.assertAllClose(expected_input_backprop_not_overlapping,
                                input_backprop_not_overlapping)
            # Test when overlapping is True
            output_tensor = constant_op.constant(output_data_overlapping,
                                                 shape=output_size)
            r = gen_nn_ops.fractional_max_pool_grad(input_tensor,
                                                    output_tensor,
                                                    grad,
                                                    row_seq,
                                                    col_seq,
                                                    overlapping=True)
            input_backprop_overlapping = r.eval()
            self.assertShapeEqual(
                np.reshape(expected_input_backprop_overlapping, input_size), r)
            self.assertAllClose(expected_input_backprop_overlapping,
                                input_backprop_overlapping)
  def testWhenRepeatedMaxValueInPoolingRegion(self):
    """Test when there's repeating value in pooling region.

    There's no formal definition for what the gradient should be when there're
    multiple max value within a pooling cell. Such as
        | 1 5 |
        | 5 3 |
    The expected result depends heavily on implementation, if someone swap the
    order of a nested for loop when walking through the tensor, result would be
    very different.

    The goal of this test is to alert when someone else change the
    implementation. Current implementation scans row-by-row.
    """
    input_data = [5.0, 4.0, 6.0, 7.0,
                  3.0, 5.0, 9.0, 6.0,
                  8.0, 8.0, 9.0, 5.0,
                  7.0, 4.0, 0.0, 0.0]  # pyformat: disable
    input_size = [1, 4, 4, 1]
    output_backprop = [12.0, 15.0,
                       17.0, -5.0,
                       6.0, 21.0]  # pyformat: disable
    row_seq = [0, 1, 3, 4]
    col_seq = [0, 2, 4]
    output_data_not_overlapping = [5.0, 7.0,
                                   8.0, 9.0,
                                   7.0, 0.0]  # pyformat: disable
    output_data_overlapping = [9.0, 9.0,
                               9.0, 9.0,
                               7.0, 0.0]  # pyformat: disable
    output_size = [1, 3, 2, 1]
    expected_input_backprop_not_overlapping = np.reshape(
        [12.0, 0.0, 0.0, 15.0,
         0.0, 0.0, -5.0, 0.0,
         17.0, 0.0, 0.0, 0.0,
         6.0, 0.0, 21.0, 0.0],
        input_size)  # pyformat: disable
    expected_input_backprop_overlapping = np.reshape(
        [0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 39.0, 0.0,
         0.0, 0.0, 0.0, 0.0,
         6.0, 0.0, 21.0, 0.0],
        input_size)  # pyformat: disable
    with self.cached_session() as _:
      # Test when overlapping is False
      input_tensor = constant_op.constant(input_data, shape=input_size)
      output_tensor = constant_op.constant(
          output_data_not_overlapping, shape=output_size)
      grad = constant_op.constant(output_backprop, shape=output_size)
      r = gen_nn_ops.fractional_max_pool_grad(
          input_tensor,
          output_tensor,
          grad,
          row_seq,
          col_seq,
          overlapping=False)
      input_backprop_not_overlapping = self.evaluate(r)
      self.assertShapeEqual(
          np.reshape(expected_input_backprop_not_overlapping, input_size), r)
      self.assertAllClose(expected_input_backprop_not_overlapping,
                          input_backprop_not_overlapping)
      # Test when overlapping is True
      output_tensor = constant_op.constant(
          output_data_overlapping, shape=output_size)
      r = gen_nn_ops.fractional_max_pool_grad(
          input_tensor, output_tensor, grad, row_seq, col_seq, overlapping=True)
      input_backprop_overlapping = self.evaluate(r)
      self.assertShapeEqual(
          np.reshape(expected_input_backprop_overlapping, input_size), r)
      self.assertAllClose(expected_input_backprop_overlapping,
                          input_backprop_overlapping)