def testMinBaseDepth(self):
   resnets = [
       resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,
       resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200
   ]
   resnet_names = [
       'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'
   ]
   for resnet, resnet_name in zip(resnets, resnet_names):
     min_base_depth = 5
     global_pool = True
     num_classes = 10
     inputs = create_test_input(2, 224, 224, 3)
     with slim.arg_scope(resnet_utils.resnet_arg_scope()):
       _, end_points = resnet(
           inputs,
           num_classes,
           global_pool=global_pool,
           min_base_depth=min_base_depth,
           depth_multiplier=0,
           scope=resnet_name)
       for block in ['block1', 'block2', 'block3', 'block4']:
         block_name = resnet_name + '/' + block
         self.assertTrue(block_name in end_points)
         self.assertEqual(
             len(end_points[block_name].get_shape().as_list()), 4)
         # The output depth is 4 times base_depth.
         depth_expected = min_base_depth * 4
         self.assertEqual(
             end_points[block_name].get_shape().as_list()[3], depth_expected)
 def testAtrousFullyConvolutionalValues(self):
   """Verify dense feature extraction with atrous convolution."""
   nominal_stride = 32
   for output_stride in [4, 8, 16, 32, None]:
     with slim.arg_scope(resnet_utils.resnet_arg_scope()):
       with tf.Graph().as_default():
         with self.test_session() as sess:
           tf.set_random_seed(0)
           inputs = create_test_input(2, 81, 81, 3)
           # Dense feature extraction followed by subsampling.
           output, _ = self._resnet_small(inputs, None, is_training=False,
                                          global_pool=False,
                                          output_stride=output_stride)
           if output_stride is None:
             factor = 1
           else:
             factor = nominal_stride // output_stride
           output = resnet_utils.subsample(output, factor)
           # Make the two networks use the same weights.
           tf.get_variable_scope().reuse_variables()
           # Feature extraction at the nominal network rate.
           expected, _ = self._resnet_small(inputs, None, is_training=False,
                                            global_pool=False)
           sess.run(tf.global_variables_initializer())
           self.assertAllClose(output.eval(), expected.eval(),
                               atol=1e-4, rtol=1e-4)
 def testEndPointsV1(self):
   """Test the end points of a tiny v1 bottleneck network."""
   blocks = [
       resnet_v1.resnet_v1_block(
           'block1', base_depth=1, num_units=2, stride=2),
       resnet_v1.resnet_v1_block(
           'block2', base_depth=2, num_units=2, stride=1),
   ]
   inputs = create_test_input(2, 32, 16, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
   expected = [
       'tiny/block1/unit_1/bottleneck_v1/shortcut',
       'tiny/block1/unit_1/bottleneck_v1/conv1',
       'tiny/block1/unit_1/bottleneck_v1/conv2',
       'tiny/block1/unit_1/bottleneck_v1/conv3',
       'tiny/block1/unit_2/bottleneck_v1/conv1',
       'tiny/block1/unit_2/bottleneck_v1/conv2',
       'tiny/block1/unit_2/bottleneck_v1/conv3',
       'tiny/block2/unit_1/bottleneck_v1/shortcut',
       'tiny/block2/unit_1/bottleneck_v1/conv1',
       'tiny/block2/unit_1/bottleneck_v1/conv2',
       'tiny/block2/unit_1/bottleneck_v1/conv3',
       'tiny/block2/unit_2/bottleneck_v1/conv1',
       'tiny/block2/unit_2/bottleneck_v1/conv2',
       'tiny/block2/unit_2/bottleneck_v1/conv3']
   self.assertItemsEqual(expected, list(end_points.keys()))
 def testEndpointNames(self):
     # Like ResnetUtilsTest.testEndPointsV2(), but for the public API.
     global_pool = True
     num_classes = 10
     inputs = create_test_input(2, 224, 224, 3)
     with slim.arg_scope(resnet_utils.resnet_arg_scope()):
         _, end_points = self._resnet_small(inputs,
                                            num_classes,
                                            global_pool=global_pool,
                                            scope='resnet')
     expected = ['resnet/conv1']
     for block in range(1, 5):
         for unit in range(1, 4 if block < 4 else 3):
             for conv in range(1, 4):
                 expected.append(
                     'resnet/block%d/unit_%d/bottleneck_v2/conv%d' %
                     (block, unit, conv))
             expected.append('resnet/block%d/unit_%d/bottleneck_v2' %
                             (block, unit))
         expected.append('resnet/block%d/unit_1/bottleneck_v2/shortcut' %
                         block)
         expected.append('resnet/block%d' % block)
     expected.extend([
         'global_pool', 'resnet/logits', 'resnet/spatial_squeeze',
         'predictions'
     ])
     self.assertItemsEqual(list(end_points.keys()), expected)
 def testFullyConvolutionalUnknownHeightWidth(self):
   batch = 2
   height, width = 65, 65
   global_pool = False
   inputs = create_test_input(batch, None, None, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
   self.assertListEqual(output.get_shape().as_list(),
                        [batch, None, None, 32])
   images = create_test_input(batch, height, width, 3)
   with self.test_session() as sess:
     sess.run(tf.global_variables_initializer())
     output = sess.run(output, {inputs: images.eval()})
     self.assertEqual(output.shape, (batch, 3, 3, 32))
 def testClassificationShapes(self):
   global_pool = True
   num_classes = 10
   inputs = create_test_input(2, 224, 224, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_small(inputs, num_classes,
                                        global_pool=global_pool,
                                        scope='resnet')
     endpoint_to_shape = {
         'resnet/block1': [2, 28, 28, 4],
         'resnet/block2': [2, 14, 14, 8],
         'resnet/block3': [2, 7, 7, 16],
         'resnet/block4': [2, 7, 7, 32]}
     for endpoint in endpoint_to_shape:
       shape = endpoint_to_shape[endpoint]
       self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
 def testFullyConvolutionalEndpointShapes(self):
   global_pool = False
   num_classes = 10
   inputs = create_test_input(2, 321, 321, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_small(inputs, num_classes,
                                        global_pool=global_pool,
                                        spatial_squeeze=False,
                                        scope='resnet')
     endpoint_to_shape = {
         'resnet/block1': [2, 41, 41, 4],
         'resnet/block2': [2, 21, 21, 8],
         'resnet/block3': [2, 11, 11, 16],
         'resnet/block4': [2, 11, 11, 32]}
     for endpoint in endpoint_to_shape:
       shape = endpoint_to_shape[endpoint]
       self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
 def testClassificationEndPoints(self):
   global_pool = True
   num_classes = 10
   inputs = create_test_input(2, 224, 224, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     logits, end_points = self._resnet_small(inputs, num_classes,
                                             global_pool=global_pool,
                                             spatial_squeeze=False,
                                             scope='resnet')
   self.assertTrue(logits.op.name.startswith('resnet/logits'))
   self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
   self.assertTrue('predictions' in end_points)
   self.assertListEqual(end_points['predictions'].get_shape().as_list(),
                        [2, 1, 1, num_classes])
   self.assertTrue('global_pool' in end_points)
   self.assertListEqual(end_points['global_pool'].get_shape().as_list(),
                        [2, 1, 1, 32])
    def testAtrousValuesBottleneck(self):
        """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.
    """
        block = resnet_v2.resnet_v2_block
        blocks = [
            block('block1', base_depth=1, num_units=2, stride=2),
            block('block2', base_depth=2, num_units=2, stride=2),
            block('block3', base_depth=4, num_units=2, stride=2),
            block('block4', base_depth=8, num_units=2, stride=1),
        ]
        nominal_stride = 8

        # Test both odd and even input dimensions.
        height = 30
        width = 31
        with slim.arg_scope(resnet_utils.resnet_arg_scope()):
            with slim.arg_scope([slim.batch_norm], is_training=False):
                for output_stride in [1, 2, 4, 8, None]:
                    with tf.Graph().as_default():
                        with self.test_session() as sess:
                            tf.set_random_seed(0)
                            inputs = create_test_input(1, height, width, 3)
                            # Dense feature extraction followed by subsampling.
                            output = resnet_utils.stack_blocks_dense(
                                inputs, blocks, output_stride)
                            if output_stride is None:
                                factor = 1
                            else:
                                factor = nominal_stride // output_stride

                            output = resnet_utils.subsample(output, factor)
                            # Make the two networks use the same weights.
                            tf.get_variable_scope().reuse_variables()
                            # Feature extraction at the nominal network rate.
                            expected = self._stack_blocks_nondense(
                                inputs, blocks)
                            sess.run(tf.global_variables_initializer())
                            output, expected = sess.run([output, expected])
                            self.assertAllClose(output,
                                                expected,
                                                atol=1e-4,
                                                rtol=1e-4)
Пример #10
0
 def testDepthMultiplier(self):
   resnets = [
       resnet_v1.resnet_v1_50, resnet_v1.resnet_v1_101,
       resnet_v1.resnet_v1_152, resnet_v1.resnet_v1_200
   ]
   resnet_names = [
       'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200'
   ]
   for resnet, resnet_name in zip(resnets, resnet_names):
     depth_multiplier = 0.25
     global_pool = True
     num_classes = 10
     inputs = create_test_input(2, 224, 224, 3)
     with slim.arg_scope(resnet_utils.resnet_arg_scope()):
       scope_base = resnet_name + '_base'
       _, end_points_base = resnet(
           inputs,
           num_classes,
           global_pool=global_pool,
           min_base_depth=1,
           scope=scope_base)
       scope_test = resnet_name + '_test'
       _, end_points_test = resnet(
           inputs,
           num_classes,
           global_pool=global_pool,
           min_base_depth=1,
           depth_multiplier=depth_multiplier,
           scope=scope_test)
       for block in ['block1', 'block2', 'block3', 'block4']:
         block_name_base = scope_base + '/' + block
         block_name_test = scope_test + '/' + block
         self.assertTrue(block_name_base in end_points_base)
         self.assertTrue(block_name_test in end_points_test)
         self.assertEqual(
             len(end_points_base[block_name_base].get_shape().as_list()), 4)
         self.assertEqual(
             len(end_points_test[block_name_test].get_shape().as_list()), 4)
         self.assertListEqual(
             end_points_base[block_name_base].get_shape().as_list()[:3],
             end_points_test[block_name_test].get_shape().as_list()[:3])
         self.assertEqual(
             int(depth_multiplier *
                 end_points_base[block_name_base].get_shape().as_list()[3]),
             end_points_test[block_name_test].get_shape().as_list()[3])
Пример #11
0
 def testUnknownBatchSize(self):
   batch = 2
   height, width = 65, 65
   global_pool = True
   num_classes = 10
   inputs = create_test_input(None, height, width, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     logits, _ = self._resnet_small(inputs, num_classes,
                                    global_pool=global_pool,
                                    spatial_squeeze=False,
                                    scope='resnet')
   self.assertTrue(logits.op.name.startswith('resnet/logits'))
   self.assertListEqual(logits.get_shape().as_list(),
                        [None, 1, 1, num_classes])
   images = create_test_input(batch, height, width, 3)
   with self.test_session() as sess:
     sess.run(tf.global_variables_initializer())
     output = sess.run(logits, {inputs: images.eval()})
     self.assertEqual(output.shape, (batch, 1, 1, num_classes))
Пример #12
0
  def testStridingLastUnitVsSubsampleBlockEnd(self):
    """Compares subsampling at the block's last unit or block's end.

    Makes sure that the final output is the same when we use a stride at the
    last unit of a block vs. we subsample activations at the end of a block.
    """
    block = resnet_v1.resnet_v1_block

    blocks = [
        block('block1', base_depth=1, num_units=2, stride=2),
        block('block2', base_depth=2, num_units=2, stride=2),
        block('block3', base_depth=4, num_units=2, stride=2),
        block('block4', base_depth=8, num_units=2, stride=1),
    ]

    # Test both odd and even input dimensions.
    height = 30
    width = 31
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      with slim.arg_scope([slim.batch_norm], is_training=False):
        for output_stride in [1, 2, 4, 8, None]:
          with tf.Graph().as_default():
            with self.test_session() as sess:
              tf.set_random_seed(0)
              inputs = create_test_input(1, height, width, 3)

              # Subsampling at the last unit of the block.
              output = resnet_utils.stack_blocks_dense(
                  inputs, blocks, output_stride,
                  store_non_strided_activations=False,
                  outputs_collections='output')
              output_end_points = slim.utils.convert_collection_to_dict(
                  'output')

              # Make the two networks use the same weights.
              tf.get_variable_scope().reuse_variables()

              # Subsample activations at the end of the blocks.
              expected = resnet_utils.stack_blocks_dense(
                  inputs, blocks, output_stride,
                  store_non_strided_activations=True,
                  outputs_collections='expected')
              expected_end_points = slim.utils.convert_collection_to_dict(
                  'expected')

              sess.run(tf.global_variables_initializer())

              # Make sure that the final output is the same.
              output, expected = sess.run([output, expected])
              self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)

              # Make sure that intermediate block activations in
              # output_end_points are subsampled versions of the corresponding
              # ones in expected_end_points.
              for i, block in enumerate(blocks[:-1:]):
                output = output_end_points[block.scope]
                expected = expected_end_points[block.scope]
                atrous_activated = (output_stride is not None and
                                    2 ** i >= output_stride)
                if not atrous_activated:
                  expected = resnet_utils.subsample(expected, 2)
                output, expected = sess.run([output, expected])
                self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)