示例#1
0
文件: resnet_v1.py 项目: JayYip/GLN
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None):
    """Bottleneck residual unit variant with BN after convolutions.

  This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
  its definition. Note that we use here the bottleneck variant which has an
  extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    scope: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(inputs,
                                   depth, [1, 1],
                                   stride=stride,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = slim.conv2d(inputs,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate,
                                            scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               activation_fn=None,
                               scope='conv3')

        output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
示例#2
0
    def testConv2DSameEven(self):
        n, n2 = 4, 2

        # Input image.
        x = create_test_input(1, n, n, 1)

        # Convolution kernel.
        w = create_test_input(1, 3, 3, 1)
        w = tf.reshape(w, [3, 3, 1, 1])

        tf.get_variable("Conv/weights", initializer=w)
        tf.get_variable("Conv/biases", initializer=tf.zeros([1]))
        tf.get_variable_scope().reuse_variables()

        y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope="Conv")
        y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]])
        y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

        y2 = resnet_utils.subsample(y1, 2)
        y2_expected = tf.to_float([[14, 43], [43, 84]])
        y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

        y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope="Conv")
        y3_expected = y2_expected

        y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope="Conv")
        y4_expected = tf.to_float([[48, 37], [37, 22]])
        y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])

        with self.test_session() as sess:
            sess.run(tf.initialize_all_variables())
            self.assertAllClose(y1.eval(), y1_expected.eval())
            self.assertAllClose(y2.eval(), y2_expected.eval())
            self.assertAllClose(y3.eval(), y3_expected.eval())
            self.assertAllClose(y4.eval(), y4_expected.eval())
示例#3
0
 def testAtrousFullyConvolutionalValues(self):
   """Verify dense feature extraction with atrous convolution."""
   nominal_stride = 32
   for output_stride in [4, 8, 16, 32, None]:
     with slim.arg_scope(xception.xception_arg_scope()):
       with tf.Graph().as_default():
         with self.test_session() as sess:
           tf.set_random_seed(0)
           inputs = create_test_input(2, 96, 97, 3)
           # Dense feature extraction followed by subsampling.
           output, _ = self._xception_small(
               inputs,
               None,
               is_training=False,
               global_pool=False,
               output_stride=output_stride)
           if output_stride is None:
             factor = 1
           else:
             factor = nominal_stride // output_stride
           output = resnet_utils.subsample(output, factor)
           # Make the two networks use the same weights.
           tf.get_variable_scope().reuse_variables()
           # Feature extraction at the nominal network rate.
           expected, _ = self._xception_small(
               inputs,
               None,
               is_training=False,
               global_pool=False)
           sess.run(tf.global_variables_initializer())
           self.assertAllClose(output.eval(), expected.eval(),
                               atol=1e-5, rtol=1e-5)
示例#4
0
 def testAtrousFullyConvolutionalValues(self):
     """Verify dense feature extraction with atrous convolution."""
     nominal_stride = 32
     for output_stride in [4, 8, 16, 32, None]:
         with slim.arg_scope(resnet_utils.resnet_arg_scope()):
             with tf.Graph().as_default():
                 with self.test_session() as sess:
                     tf.set_random_seed(0)
                     inputs = create_test_input(2, 81, 81, 3)
                     # Dense feature extraction followed by subsampling.
                     output, _ = self._resnet_small(
                         inputs,
                         None,
                         is_training=False,
                         global_pool=False,
                         output_stride=output_stride)
                     if output_stride is None:
                         factor = 1
                     else:
                         factor = nominal_stride // output_stride
                     output = resnet_utils.subsample(output, factor)
                     # Make the two networks use the same weights.
                     tf.get_variable_scope().reuse_variables()
                     # Feature extraction at the nominal network rate.
                     expected, _ = self._resnet_small(inputs,
                                                      None,
                                                      is_training=False,
                                                      global_pool=False)
                     sess.run(tf.global_variables_initializer())
                     self.assertAllClose(output.eval(),
                                         expected.eval(),
                                         atol=1e-4,
                                         rtol=1e-4)
def basic_block(inputs,
                depth,
                depth_bottleneck,
                stride,
                unit_rate=1,
                rate=1,
                outputs_collections=None,
                data_format="NHWC",
                scope=None):

    # tf.logging.info(inputs)
    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        depth_in = channel_dimension(inputs.get_shape(),
                                     data_format,
                                     min_rank=4)
        preact = slim.batch_norm(inputs,
                                 activation_fn=tf.nn.relu,
                                 scope='preact')
        if depth_bottleneck == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = layers_lib.conv2d(preact,
                                         depth_bottleneck, [1, 1],
                                         stride=stride,
                                         normalizer_fn=None,
                                         activation_fn=None,
                                         scope='shortcut')

        residual = layers_lib.conv2d(preact, depth_bottleneck, 3, stride=1)

        residual = resnet_utils.subsample(residual, stride, scope='conv1')

        residual = layers_lib.conv2d(residual,
                                     depth_bottleneck,
                                     3,
                                     stride=1,
                                     rate=rate * unit_rate,
                                     scope='conv2',
                                     normalizer_fn=None,
                                     activation_fn=None)

        output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
示例#6
0
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               unit_rate=1,
               rate=1,
               outputs_collections=None,
               scope=None):
  """Bottleneck residual unit variant with BN after convolutions.

  This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
  its definition. Note that we use here the bottleneck variant which has an
  extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    unit_rate: An integer, unit rate for atrous convolution.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    scope: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
  with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
    depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
    if depth == depth_in:
      shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
    else:
      shortcut = slim.conv2d(
          inputs,
          depth,
          [1, 1],
          stride=stride,
          activation_fn=None,
          scope='shortcut')

    residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
                           scope='conv1')
    residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
                                        rate=rate*unit_rate, scope='conv2')
    residual = slim.conv2d(residual, depth, [1, 1], stride=1,
                           activation_fn=None, scope='conv3')
    output = tf.nn.relu(shortcut + residual)

    return slim.utils.collect_named_outputs(outputs_collections,
                                            sc.name,
                                            output)
示例#7
0
    def testSeparableConv2DSameWithInputEvenSize(self):
        n, n2 = 4, 2

        # Input image.
        x = create_test_input(1, n, n, 1)

        # Convolution kernel.
        dw = create_test_input(1, 3, 3, 1)
        dw = tf.reshape(dw, [3, 3, 1, 1])

        tf.get_variable('Conv/depthwise_weights', initializer=dw)
        tf.get_variable('Conv/pointwise_weights',
                        initializer=tf.ones([1, 1, 1, 1]))
        tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
        tf.get_variable_scope().reuse_variables()

        y1 = slim.separable_conv2d(x,
                                   1, [3, 3],
                                   depth_multiplier=1,
                                   stride=1,
                                   scope='Conv')
        y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37],
                                   [43, 66, 84, 46], [26, 37, 46, 22]])
        y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

        y2 = resnet_utils.subsample(y1, 2)
        y2_expected = tf.to_float([[14, 43], [43, 84]])
        y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

        y3 = xception.separable_conv2d_same(x,
                                            1,
                                            3,
                                            depth_multiplier=1,
                                            regularize_depthwise=True,
                                            stride=2,
                                            scope='Conv')
        y3_expected = y2_expected

        y4 = slim.separable_conv2d(x,
                                   1, [3, 3],
                                   depth_multiplier=1,
                                   stride=2,
                                   scope='Conv')
        y4_expected = tf.to_float([[48, 37], [37, 22]])
        y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            self.assertAllClose(y1.eval(), y1_expected.eval())
            self.assertAllClose(y2.eval(), y2_expected.eval())
            self.assertAllClose(y3.eval(), y3_expected.eval())
            self.assertAllClose(y4.eval(), y4_expected.eval())
            print("y1:%s, y1_expected:%s " % (y1.eval(), y1_expected.eval()))
            print("y2:%s, y2_expected:%s " % (y2.eval(), y2_expected.eval()))
            print("y3:%s, y3_expected:%s " % (y3.eval(), y3_expected.eval()))
            print("y4:%s, y4_expected:%s " % (y4.eval(), y4_expected.eval()))
示例#8
0
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               unit_rate=1,
               rate=1,
               outputs_collections=None,
               scope=None):
    """卷积后BN的瓶颈残余单元变体。
    请注意,我们在这里使用瓶颈变体,它具有额外的瓶颈层。
    将两个连续的ResNet块放在一起使用时,应该在第一个块的最后一个单元中使用stride = 2。
    Args:
    :param inputs:tensor,[batch, height, width, channels]
    :param depth:ResNet单元的输出深度
    :param depth_bottleneck:瓶颈层的深度
    :param stride:ResNet单元的步长.确定与输入相比的单位输出的下采样量。
    :param unit_rate:Integer,用于atrous 卷积的单元率
    :param rate:Integer.atrous卷积率
    :param outputs_collections:用于添加ResNet单元输出的集合。
    :param scope:可选变量范围

    :return:ResNet单元的输出
    """
    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(inputs,
                                   depth, [1, 1],
                                   stride=stride,
                                   activation_fn=None,
                                   scope='shortcut')
        residual = slim.conv2d(inputs,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')

        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate * unit_rate,
                                            scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               activation_fn=None,
                               scope='conv3')

        output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
示例#9
0
    def testSeparableConv2DSameWithInputOddSize(self):
        n, n2 = 5, 3

        # Input image.
        x = create_test_input(1, n, n, 1)

        # Convolution kernel.
        dw = create_test_input(1, 3, 3, 1)
        dw = tf.reshape(dw, [3, 3, 1, 1])

        tf.get_variable('Conv/depthwise_weights', initializer=dw)
        tf.get_variable('Conv/pointwise_weights',
                        initializer=tf.ones([1, 1, 1, 1]))
        tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
        tf.compat.v1.get_variable_scope().reuse_variables()

        y1 = slim.separable_conv2d(x,
                                   1, [3, 3],
                                   depth_multiplier=1,
                                   stride=1,
                                   scope='Conv')
        y1_expected = tf.cast(
            [[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55],
             [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]], tf.float32)
        y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

        y2 = resnet_utils.subsample(y1, 2)
        y2_expected = tf.cast([[14, 43, 34], [43, 84, 55], [34, 55, 30]],
                              tf.float32)
        y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

        y3 = xception.separable_conv2d_same(x,
                                            1,
                                            3,
                                            depth_multiplier=1,
                                            regularize_depthwise=True,
                                            stride=2,
                                            scope='Conv')
        y3_expected = y2_expected

        y4 = slim.separable_conv2d(x,
                                   1, [3, 3],
                                   depth_multiplier=1,
                                   stride=2,
                                   scope='Conv')
        y4_expected = y2_expected

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            self.assertAllClose(y1.eval(), y1_expected.eval())
            self.assertAllClose(y2.eval(), y2_expected.eval())
            self.assertAllClose(y3.eval(), y3_expected.eval())
            self.assertAllClose(y4.eval(), y4_expected.eval())
示例#10
0
def bottleneck_features(inputT,
                        depth,
                        depth_bottleneck,
                        stride,
                        rate=1,
                        outputs_collections=None,
                        scope=None):
  """
  simplified bottleneck_v1 with features

  Args:
    inputT: A tensor of size [batch, height, width, channel]
    depth: output depth
    depth_bottleneck: depth of bottleneck
    stride: Amount of downsampling of the output to input
    rate: An integer, rate for atrous convolution
    outputs_collections: Collection to add the output
    scope: optional variable scope
  """
  with tf.variable_scope(scope, 'bottleneck_v1', [inputT]) as sc:
    depth_in = slim.utils.last_dimension(inputT.get_shape(), min_rank=4)
    
    # skip-connection part
    if depth == depth_in:
      shortcut = subsample(inputT, stride, scope='shortcut')
    else:
      shortcut = slim.conv2d(
          inputT,
          depth, [1, 1],
          stride=stride,
          activation_fn=None,
          scope='shortcut')

    # residual part
    residual = slim.conv2d(inputT, depth_bottleneck, [1, 1], stride=1,
                           scope='conv1')
    residual = conv2d_same(residual, depth_bottleneck, 3, stride,
                                        rate=rate, scope='conv2')
    conv_3 = slim.conv2d(residual, depth, [1, 1], stride=1,
                           activation_fn=None, normalizer_fn=None, scope='conv3')
    residual = slim.batch_norm(conv_3)


    output = tf.nn.relu(shortcut + residual)

    return slim.utils.collect_named_outputs(outputs_collections,
                                            sc.name,
                                            output)
示例#11
0
  def testSeparableConv2DSameWithInputOddSize(self):
    n, n2 = 5, 3

    # Input image.
    x = create_test_input(1, n, n, 1)

    # Convolution kernel.
    dw = create_test_input(1, 3, 3, 1)
    dw = tf.reshape(dw, [3, 3, 1, 1])

    tf.get_variable('Conv/depthwise_weights', initializer=dw)
    tf.get_variable('Conv/pointwise_weights',
                    initializer=tf.ones([1, 1, 1, 1]))
    tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
    tf.get_variable_scope().reuse_variables()

    y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1,
                               stride=1, scope='Conv')
    y1_expected = tf.to_float([[14, 28, 43, 58, 34],
                               [28, 48, 66, 84, 46],
                               [43, 66, 84, 102, 55],
                               [58, 84, 102, 120, 64],
                               [34, 46, 55, 64, 30]])
    y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

    y2 = resnet_utils.subsample(y1, 2)
    y2_expected = tf.to_float([[14, 43, 34],
                               [43, 84, 55],
                               [34, 55, 30]])
    y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

    y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1,
                                        regularize_depthwise=True,
                                        stride=2, scope='Conv')
    y3_expected = y2_expected

    y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1,
                               stride=2, scope='Conv')
    y4_expected = y2_expected

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      self.assertAllClose(y1.eval(), y1_expected.eval())
      self.assertAllClose(y2.eval(), y2_expected.eval())
      self.assertAllClose(y3.eval(), y3_expected.eval())
      self.assertAllClose(y4.eval(), y4_expected.eval())
示例#12
0
    def _atrousValues(self, bottleneck):
        """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.

    Args:
      bottleneck: The bottleneck function.
    """
        blocks = [
            resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
            resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
            resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
            resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
        ]
        nominal_stride = 8

        # Test both odd and even input dimensions.
        height = 30
        width = 31
        with slim.arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
            for output_stride in [1, 2, 4, 8, None]:
                with tf.Graph().as_default():
                    with self.test_session() as sess:
                        tf.set_random_seed(0)
                        inputs = create_test_input(1, height, width, 3)
                        # Dense feature extraction followed by subsampling.
                        output = resnet_utils.stack_blocks_dense(
                            inputs, blocks, output_stride)
                        if output_stride is None:
                            factor = 1
                        else:
                            factor = nominal_stride // output_stride

                        output = resnet_utils.subsample(output, factor)
                        # Make the two networks use the same weights.
                        tf.get_variable_scope().reuse_variables()
                        # Feature extraction at the nominal network rate.
                        expected = self._stack_blocks_nondense(inputs, blocks)
                        sess.run(tf.initialize_all_variables())
                        output, expected = sess.run([output, expected])
                        self.assertAllClose(output,
                                            expected,
                                            atol=1e-4,
                                            rtol=1e-4)
    def testAtrousFullyConvolutionValue(self):
        """Verify dense feature extraction with atrous convolution."""
        #从这个测试我们可以看出在带孔卷积加入后,输出了在相对自己solution
        #的位置的pix 是不变的,只是分辨率变大了
        nominal_stride = 32
        for output_stride in [4, 8, 16, 32, None]:
            with slim.arg_scope(resnet_utils.resnet_arg_scope()):
                with tf.Graph().as_default():
                    with self.test_session() as sess:
                        tf.set_random_seed(0)
                        inputs = create_test_input(2, 81, 81, 3)
                        # Dense feature extraction followed by subsampling.
                        output, _ = self._resnet_small(
                            inputs,
                            None,
                            is_training=False,
                            global_pool=False,
                            output_stride=output_stride)
                        if output_stride is None:
                            factor = 1
                        else:
                            #采用带孔卷积我们get a dense output 而另外
                            #使用normal conv output 要小很多

                            factor = nominal_stride // output_stride
                        output = resnet_utils.subsample(output, factor)
                        # Make the two networks use the same weights.
                        tf.get_variable_scope().reuse_variables()
                        # Featrue extraction at the nominal network rate.
                        expected, _ = self._resnet_small(inputs,
                                                         None,
                                                         is_training=False,
                                                         global_pool=False)
                        #  make the variable has their own value in the model
                        sess.run(tf.global_variable_initializer())
                        #如果你有一个Tensor t,在使用t.eval()时,等价于:tf.get_default_session().run(t).
                        '''Calling this method will execute all preceding operations that produce the inputs needed for
                         the operation that produces this tensor. N.B. Before invoking Tensor.eval(), its graph must have
                          been launched in a session,and either a default session must be available, or session must be 
                          specified explicitly.'''
                        self.assertAllClose(output.eval(),
                                            expected.eval(),
                                            atol=1e-4,
                                            rtol=1e-4)
示例#14
0
  def _atrousValues(self, bottleneck):
    """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.

    Args:
      bottleneck: The bottleneck function.
    """
    blocks = [
        resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
        resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
        resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
        resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
    ]
    nominal_stride = 8

    # Test both odd and even input dimensions.
    height = 30
    width = 31
    with slim.arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
      for output_stride in [1, 2, 4, 8, None]:
        with tf.Graph().as_default():
          with self.test_session() as sess:
            tf.set_random_seed(0)
            inputs = create_test_input(1, height, width, 3)
            # Dense feature extraction followed by subsampling.
            output = resnet_utils.stack_blocks_dense(inputs,
                                                     blocks,
                                                     output_stride)
            if output_stride is None:
              factor = 1
            else:
              factor = nominal_stride // output_stride

            output = resnet_utils.subsample(output, factor)
            # Make the two networks use the same weights.
            tf.get_variable_scope().reuse_variables()
            # Feature extraction at the nominal network rate.
            expected = self._stack_blocks_nondense(inputs, blocks)
            sess.run(tf.global_variables_initializer())
            output, expected = sess.run([output, expected])
            self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
示例#15
0
  def testConv2DSameOdd(self):
    n, n2 = 5, 3

    # Input image.
    x = create_test_input(1, n, n, 1)

    # Convolution kernel.
    w = create_test_input(1, 3, 3, 1)
    w = tf.reshape(w, [3, 3, 1, 1])

    tf.get_variable('Conv/weights', initializer=w)
    tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
    tf.get_variable_scope().reuse_variables()

    y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
    y1_expected = tf.to_float([[14, 28, 43, 58, 34],
                               [28, 48, 66, 84, 46],
                               [43, 66, 84, 102, 55],
                               [58, 84, 102, 120, 64],
                               [34, 46, 55, 64, 30]])
    y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

    y2 = resnet_utils.subsample(y1, 2)
    y2_expected = tf.to_float([[14, 43, 34],
                               [43, 84, 55],
                               [34, 55, 30]])
    y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

    y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
    y3_expected = y2_expected

    y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
    y4_expected = y2_expected

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      self.assertAllClose(y1.eval(), y1_expected.eval())
      self.assertAllClose(y2.eval(), y2_expected.eval())
      self.assertAllClose(y3.eval(), y3_expected.eval())
      self.assertAllClose(y4.eval(), y4_expected.eval())
示例#16
0
  def testConv2DSameEven(self):
    n, n2 = 4, 2

    # Input image.
    x = create_test_input(1, n, n, 1)

    # Convolution kernel.
    w = create_test_input(1, 3, 3, 1)
    w = tf.reshape(w, [3, 3, 1, 1])

    tf.get_variable('Conv/weights', initializer=w)
    tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
    tf.get_variable_scope().reuse_variables()

    y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
    y1_expected = tf.to_float([[14, 28, 43, 26],
                               [28, 48, 66, 37],
                               [43, 66, 84, 46],
                               [26, 37, 46, 22]])
    y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

    y2 = resnet_utils.subsample(y1, 2)
    y2_expected = tf.to_float([[14, 43],
                               [43, 84]])
    y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

    y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
    y3_expected = y2_expected

    y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
    y4_expected = tf.to_float([[48, 37],
                               [37, 22]])
    y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])

    with self.test_session() as sess:
      sess.run(tf.initialize_all_variables())
      self.assertAllClose(y1.eval(), y1_expected.eval())
      self.assertAllClose(y2.eval(), y2_expected.eval())
      self.assertAllClose(y3.eval(), y3_expected.eval())
      self.assertAllClose(y4.eval(), y4_expected.eval())
示例#17
0
 def testSubsampleFourByFour(self):
     x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
     x = resnet_utils.subsample(x, 2)
     expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
     with self.test_session():
         self.assertAllClose(x.eval(), expected.eval())
示例#18
0
 def testSubsampleFourByFour(self):
   x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
   x = resnet_utils.subsample(x, 2)
   expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
   with self.test_session():
     self.assertAllClose(x.eval(), expected.eval())
示例#19
0
 def testSubsampleThreeByThree(self):
   x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
   x = resnet_utils.subsample(x, 2)
   expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
   with self.test_session():
     self.assertAllClose(x.eval(), expected.eval())
示例#20
0
    def testSeparableConv2DSameWithInputEvenSize(self):
        n, n2 = 4, 2

        # Input image.
        x = create_test_input(1, n, n, 1)

        # Convolution kernel =======================================
        dw = create_test_input(1, 3, 3, 1)
        dw = tf.reshape(dw, [3, 3, 1, 1])

        # tf variables for kernel in slim.separable_conv2d()
        # enable reuse of tf variables in slim.separable_conv2d()
        tf.get_variable('Conv/depthwise_weights', initializer=dw)
        tf.get_variable('Conv/pointwise_weights',
                        initializer=tf.ones([1, 1, 1, 1]))
        tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
        tf.get_variable_scope().reuse_variables()

        #-------------------------------------------------------
        # test 1: separable conv 2d of 4x4 input
        y1 = slim.separable_conv2d(x,
                                   1, [3, 3],
                                   depth_multiplier=1,
                                   stride=1,
                                   scope='Conv')

        # an expected result when n=4 from separable_conv2d()
        y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37],
                                   [43, 66, 84, 46], [26, 37, 46, 22]])

        # reshape to [n,n] to [1,n,n,1]
        y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

        #-------------------------------------------------------
        # test 2: subsampling to 4x4 to 2x2

        # subsampling of test result y from 4x4 to 2x2
        y2 = resnet_utils.subsample(y1, 2)
        y2_expected = tf.to_float([[14, 43], [43, 84]])
        y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

        #-------------------------------------------------------
        # test 3: separable conv 2d of 4x4 input with stride2 and use_explicit_padding= True
        y3 = xception.separable_conv2d_same(x,
                                            1,
                                            3,
                                            depth_multiplier=1,
                                            regularize_depthwise=True,
                                            stride=2,
                                            scope='Conv')
        y3_expected = y2_expected

        #-------------------------------------------------------
        # test 4: separable conv 2d of 4x4 input with stride2
        y4 = slim.separable_conv2d(x,
                                   1, [3, 3],
                                   depth_multiplier=1,
                                   stride=2,
                                   scope='Conv')
        y4_expected = tf.to_float([[48, 37], [37, 22]])
        y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])

        # run tests by tf.sess
        with self.test_session() as sess:
            print('[tf.Test] run testSeparableConv2DSameWithInputEvenSize()')
            sess.run(tf.global_variables_initializer())
            self.assertAllClose(y1.eval(), y1_expected.eval())
            self.assertAllClose(y2.eval(), y2_expected.eval())
            self.assertAllClose(y3.eval(), y3_expected.eval())
            self.assertAllClose(y4.eval(), y4_expected.eval())
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               residual_mask=None,
               scope=None):
    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        flops = 0

        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        preact = slim.batch_norm(inputs,
                                 activation_fn=tf.nn.relu,
                                 scope='preact')
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut, current_flops = flopsometer.conv2d(preact,
                                                         depth, [1, 1],
                                                         stride=stride,
                                                         normalizer_fn=None,
                                                         activation_fn=None,
                                                         scope='shortcut')
            flops += current_flops

        if residual_mask is not None:
            # Max-pooling trick only works correctly when stride is 1.
            # We assume that stride=2 happens in the first layer where
            # residual_mask is None.
            assert stride == 1
            diluted_residual_mask = slim.max_pool2d(residual_mask, [3, 3],
                                                    stride=1,
                                                    padding='SAME')
        else:
            diluted_residual_mask = None

        residual, current_flops = flopsometer.conv2d(
            preact,
            depth_bottleneck, [1, 1],
            stride=1,
            output_mask=diluted_residual_mask,
            scope='conv1')
        flops += current_flops

        residual, current_flops = flopsometer.conv2d_same(
            residual,
            depth_bottleneck,
            3,
            stride,
            rate=rate,
            output_mask=residual_mask,
            scope='conv2')
        flops += current_flops

        residual, current_flops = flopsometer.conv2d(residual,
                                                     depth, [1, 1],
                                                     stride=1,
                                                     normalizer_fn=None,
                                                     activation_fn=None,
                                                     output_mask=residual_mask,
                                                     scope='conv3')
        flops += current_flops

        if residual_mask is not None:
            residual *= residual_mask

        outputs = shortcut + residual

        return outputs, flops
示例#22
0
 def testSubsampleThreeByThree(self):
     x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
     x = resnet_utils.subsample(x, 2)
     expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
     with self.test_session():
         self.assertAllClose(x.eval(), expected.eval())