コード例 #1
0
 def testAtrousFullyConvolutionalValues(self):
     """Verify dense feature extraction with atrous convolution."""
     nominal_stride = 32
     for output_stride in [4, 8, 16, 32, None]:
         with slim.arg_scope(resnet_utils.resnet_arg_scope()):
             with tf.Graph().as_default():
                 with self.test_session() as sess:
                     tf.set_random_seed(0)
                     inputs = create_test_input(2, 81, 81, 3)
                     # Dense feature extraction followed by subsampling.
                     output, _ = self._resnet_small(
                         inputs,
                         None,
                         is_training=False,
                         global_pool=False,
                         output_stride=output_stride)
                     if output_stride is None:
                         factor = 1
                     else:
                         factor = nominal_stride // output_stride
                     output = resnet_utils.subsample(output, factor)
                     # Make the two networks use the same weights.
                     tf.get_variable_scope().reuse_variables()
                     # Feature extraction at the nominal network rate.
                     expected, _ = self._resnet_small(inputs,
                                                      None,
                                                      is_training=False,
                                                      global_pool=False)
                     sess.run(tf.global_variables_initializer())
                     self.assertAllClose(output.eval(),
                                         expected.eval(),
                                         atol=1e-4,
                                         rtol=1e-4)
コード例 #2
0
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None):
    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(inputs,
                                   depth, [1, 1],
                                   stride=1,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = slim.conv2d(inputs,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = resnet_utils.conv2d_same(
            residual, depth_bottleneck, 3, stride=stride,
            scope='conv2')  # why stride=stride?
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               activation_fn=None,
                               scope='conv3')

        output = tf.nn.relu(residual + shortcut)

        return slim.utils.collect_named_outputs(outputs_collections,
                                                sc.original_name_scope, output)
コード例 #3
0
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None):
    """Bottleneck residual unit variant with BN after convolutions.

  This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
  its definition. Note that we use here the bottleneck variant which has an
  extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    scope: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(inputs,
                                   depth, [1, 1],
                                   stride=stride,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = slim.conv2d(inputs,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate,
                                            scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               activation_fn=None,
                               scope='conv3')

        output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections,
                                                sc.original_name_scope, output)
コード例 #4
0
def basic_sep(inputs,
              depth,
              stride,
              rate=1,
              use_batch_norm=True,
              outputs_collections=None,
              scope=None):
    """Builds a JITNet encoder or decoder block with separable convolutions."""
    with tf.variable_scope(scope, 'basic_sep_v2', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        if use_batch_norm:
            preact = slim.batch_norm(inputs,
                                     activation_fn=tf.nn.relu,
                                     scope='preact')
        else:
            preact = tf.contrib.layers.layer_norm(inputs,
                                                  activation_fn=tf.nn.relu,
                                                  scope='preact')

        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact,
                                   depth, [1, 1],
                                   stride=stride,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = separable_conv2d(preact,
                                    None, [3, 3],
                                    depth_multiplier=1,
                                    scope='conv1_depthwise')

        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               rate=rate,
                               stride=stride,
                               scope='conv1')

        residual = separable_conv2d(residual,
                                    None, [3, 3],
                                    depth_multiplier=1,
                                    stride=1,
                                    scope='conv2_depthwise')

        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               normalizer_fn=None,
                               activation_fn=None,
                               scope='conv2')

        output = shortcut + residual

        return slim.utils.collect_named_outputs(outputs_collections,
                                                sc.original_name_scope, output)
コード例 #5
0
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, scope=None):
    """Bottleneck residual unit variant with BN before convolutions
    When putting together 2 consecutive ResNet blocks that use this unit,
    one should use stride =2 in the last unit of first block

    NOTE: This scripts refer to keras resnet50
    Args:
        inputs: A tensor of size [batchsize, height, width, channels] (after BN)
        depth: The depth of the ResNet unit output
        depth_bottleneck: The depth of bottleneck layers
        stride: the ResNet unit's stride. Determines the amount of downsampling of
            the units output compared to its input
        scope: Optional variable_scope

    Returns:
        The ResNet unit output
    """
    with tf.variable_scope(scope, 'bottleneck', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        preact = slim.batch_norm(inputs,
                                 activation_fn=tf.nn.relu,
                                 scope='preact')
        # shortcut
        if depth == depth_in:
            # identity block with no conv layer at shortcut
            shortcut = ru.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact,
                                   depth, [1, 1],
                                   stride=1,
                                   scope='shortcut')
        # layer1
        residual = slim.conv2d(inputs,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1',
                               normalizer_fn=None,
                               activation_fn=None)
        # layer 2
        residual = ru.conv2d_same(residual,
                                  depth_bottleneck,
                                  3,
                                  stride=stride,
                                  rate=rate,
                                  scope='conv2')
        # layer 3
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               scope='conv3',
                               normalizer_fn=None,
                               activation_fn=None)

        output = shortcut + residual
        return output
コード例 #6
0
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None):

    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        preact = slim.batch_norm(inputs,
                                 activation_fn=tf.nn.relu,
                                 scope='preact')
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact,
                                   depth, [1, 1],
                                   stride=stride,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = slim.conv2d(preact,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        #    residual=tf.nn.dropout(residual,0.5)

        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate,
                                            scope='conv2')
        residual = tf.nn.dropout(residual, 1.0)

        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               normalizer_fn=None,
                               activation_fn=None,
                               scope='conv3')
        #    residual=tf.nn.dropout(residual,0.5)

        output = shortcut + residual

        return slim.utils.collect_named_outputs(outputs_collections,
                                                sc.original_name_scope, output)
コード例 #7
0
    def _atrousValues(self, bottleneck):
        """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.

    Args:
      bottleneck: The bottleneck function.
    """
        blocks = [
            resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
            resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
            resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
            resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
        ]
        nominal_stride = 8

        # Test both odd and even input dimensions.
        height = 30
        width = 31
        with slim.arg_scope(resnet_utils.resnet_arg_scope()):
            with slim.arg_scope([slim.batch_norm], is_training=False):
                for output_stride in [1, 2, 4, 8, None]:
                    with tf.Graph().as_default():
                        with self.test_session() as sess:
                            tf.set_random_seed(0)
                            inputs = create_test_input(1, height, width, 3)
                            # Dense feature extraction followed by subsampling.
                            output = resnet_utils.stack_blocks_dense(
                                inputs, blocks, output_stride)
                            if output_stride is None:
                                factor = 1
                            else:
                                factor = nominal_stride // output_stride

                            output = resnet_utils.subsample(output, factor)
                            # Make the two networks use the same weights.
                            tf.get_variable_scope().reuse_variables()
                            # Feature extraction at the nominal network rate.
                            expected = self._stack_blocks_nondense(
                                inputs, blocks)
                            sess.run(tf.global_variables_initializer())
                            output, expected = sess.run([output, expected])
                            self.assertAllClose(output,
                                                expected,
                                                atol=1e-4,
                                                rtol=1e-4)
コード例 #8
0
ファイル: resnet_v1.py プロジェクト: heidongxianhau/blitznet
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
               outputs_collections=None, scope=None):
  """Bottleneck residual unit variant with BN after convolutions.

  This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
  its definition. Note that we use here the bottleneck variant which has an
  extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    scope: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
  with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
    depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
    if depth == depth_in:
      shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
    else:
      shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride,
                             activation_fn=None, scope='shortcut')

    # residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
    #                        scope='conv1')
    # residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
    #                                     rate=rate, scope='conv2')

    residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=stride,
                           scope='conv1')
    residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, 1,
                                        rate=rate, scope='conv2')
    residual = slim.conv2d(residual, depth, [1, 1], stride=1,
                           activation_fn=None, scope='conv3')

    output = tf.nn.relu(shortcut + residual)

    return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                            output)
コード例 #9
0
ファイル: resnet_v1.py プロジェクト: luke2997/hover_net
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None,
               use_bounded_activations=False):

    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(
                inputs,
                depth, [1, 1],
                stride=stride,
                activation_fn=tf.nn.relu6 if use_bounded_activations else None,
                scope='shortcut')

        residual = slim.conv2d(inputs,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate,
                                            scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               activation_fn=None,
                               scope='conv3')

        if use_bounded_activations:
            # Use clip_by_value to simulate bandpass activation.
            residual = tf.clip_by_value(residual, -6.0, 6.0)
            output = tf.nn.relu6(shortcut + residual)
        else:
            output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
コード例 #10
0
  def testConv2DSameOdd(self):
    n, n2 = 5, 3

    # Input image.
    x = create_test_input(1, n, n, 1)

    # Convolution kernel.
    w = create_test_input(1, 3, 3, 1)
    w = tf.reshape(w, [3, 3, 1, 1])

    tf.get_variable('Conv/weights', initializer=w)
    tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
    tf.get_variable_scope().reuse_variables()

    y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
    y1_expected = tf.to_float([[14, 28, 43, 58, 34],
                               [28, 48, 66, 84, 46],
                               [43, 66, 84, 102, 55],
                               [58, 84, 102, 120, 64],
                               [34, 46, 55, 64, 30]])
    y1_expected = tf.reshape(y1_expected, [1, n, n, 1])

    y2 = resnet_utils.subsample(y1, 2)
    y2_expected = tf.to_float([[14, 43, 34],
                               [43, 84, 55],
                               [34, 55, 30]])
    y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])

    y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
    y3_expected = y2_expected

    y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
    y4_expected = y2_expected

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      self.assertAllClose(y1.eval(), y1_expected.eval())
      self.assertAllClose(y2.eval(), y2_expected.eval())
      self.assertAllClose(y3.eval(), y3_expected.eval())
      self.assertAllClose(y4.eval(), y4_expected.eval())
コード例 #11
0
 def testSubsampleFourByFour(self):
     x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
     x = resnet_utils.subsample(x, 2)
     expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
     with self.test_session():
         self.assertAllClose(x.eval(), expected.eval())
コード例 #12
0
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               lambda_decay=LAMBDA_DECAY,
               rate=1,
               outputs_collections=None,
               scope=None):
    """Bottleneck residual unit variant with BN before convolutions.

  This is the full preactivation residual unit variant proposed in [2]. See
  Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
  variant which has an extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    scope: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
    with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
        preact = layers.batch_norm(inputs,
                                   activation_fn=nn_ops.relu,
                                   scope='preact')
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = layers_lib.conv2d(preact,
                                         depth, [1, 1],
                                         stride=stride,
                                         normalizer_fn=None,
                                         activation_fn=None,
                                         scope='shortcut')

        residual = layers_lib.conv2d(preact,
                                     depth_bottleneck, [1, 1],
                                     stride=1,
                                     scope='conv1')
        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate,
                                            scope='conv2')
        residual = layers_lib.conv2d(residual,
                                     depth, [1, 1],
                                     stride=1,
                                     normalizer_fn=None,
                                     activation_fn=None,
                                     scope='conv3')

        n, h, w, c = residual.get_shape()
        # lambda_channel = tf.get_variable(tf.ones(shape=[1, 1, 1, c], dtype=tf.float32, name=None), name='lambda_channel')
        lambda_channel = tf.get_variable(
            name='lambda_channel',
            shape=[1, 1, 1, depth],
            dtype=tf.float32,
            initializer=tf.ones_initializer(),
            regularizer=tf.contrib.layers.l1_regularizer(LAMBDA_DECAY,
                                                         scope=None))
        tf.add_to_collection('lambda_channel', lambda_channel)

        # lambda_channel = tf.contrib.layers.l1_regularizer(lambda_decay, scope='lambda_channel')(lambda_channel)
        # lambda_channel = utils.collect_named_outputs(outputs_collections, lambda_channel.name, lambda_channel)
        #TODO: lambda_decay
        # residual = tf.multiply(tf.tile(lambda_channel, [n, h, w, 1]), residual)
        residual = residual * lambda_channel
        output = shortcut + residual
        # print('outputs_collections', utils.collect_named_outputs(outputs_collections, sc.name, output))
    return utils.collect_named_outputs(outputs_collections, sc.name, output)
コード例 #13
0
ファイル: resnet_v2.py プロジェクト: MisterTab/TensorFlow
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None):
    """Bottleneck residual unit variant with BN before convolutions.
       在卷积之前进行BN的瓶颈残差单元变体

    This is the full preactivation residual unit variant proposed in [2]. See
    Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
    variant which has an extra bottleneck layer.

    When putting together two consecutive ResNet blocks that use this unit, one
    should use stride = 2 in the last unit of the first block.

    Args:
      inputs: A tensor of size [batch, height, width, channels].
              输入张量
      depth: The depth of the ResNet unit output.
             残差网络单元输出的深度
      depth_bottleneck: The depth of the bottleneck layers.
                        瓶颈的深度
      stride: The ResNet unit's stride. Determines the amount of downsampling of
              残差网络单元的步长
        the units output compared to its input.
      rate: An integer, rate for atrous convolution.
            带洞卷积的比例
      outputs_collections: Collection to add the ResNet unit output.
                           收集网络中单元的输出
      scope: Optional variable_scope.
             可选的变量作用域

    Returns:
      The ResNet unit's output.
    """
    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        preact = slim.batch_norm(inputs,
                                 activation_fn=tf.nn.relu,
                                 scope='preact')
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact,
                                   depth, [1, 1],
                                   stride=stride,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = slim.conv2d(preact,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate,
                                            scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               normalizer_fn=None,
                               activation_fn=None,
                               scope='conv3')

        output = shortcut + residual

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
コード例 #14
0
 def testSubsampleThreeByThree(self):
     x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
     x = resnet_utils.subsample(x, 2)
     expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
     with self.test_session():
         self.assertAllClose(x.eval(), expected.eval())
コード例 #15
0
    def bottleneck(self,
                   inputs,
                   depth,
                   depth_bottleneck,
                   stride,
                   rate=1,
                   deformable=None,
                   attention_option=None,
                   outputs_collections=None,
                   scope=None):
        """Bottleneck residual unit variant with BN before convolutions.
    
        This is the full preactivation residual unit variant proposed in [2]. See
        lim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')ig. 1(b) of [2] for its definition. Note that we use here the bottleneck
        variant which has an extra bottleneck layer.

        When putting together two consecutive ResNet blocks that use this unit, one
        should use stride = 2 in the last unit of the first block.

        Args:
            inputs: A tensor of size [batch, height, width, channels].
            depth: The depth of the ResNet unit output.
            depth_bottleneck: The depth of the bottleneck layers.
            stride: The ResNet unit's stride. Determines the amount of downsampling of
                the units output compared to its input.
            rate: An integer, rate for atrous convolution.
            outputs_collections: Collection to add the ResNet unit output.
            scope: Optional variable_scope.

        Returns:
            The ResNet unit's output.
        """
        with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
            depth_in = slim.utils.last_dimension(inputs.get_shape(),
                                                 min_rank=4)
            # preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
            preact = tf.nn.relu(inputs)
            if depth == depth_in:
                shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
            else:
                shortcut = slim.conv2d(preact,
                                       depth, [1, 1],
                                       stride=stride,
                                       normalizer_fn=None,
                                       activation_fn=None,
                                       scope='shortcut')

            residual = slim.conv2d(preact,
                                   depth_bottleneck, [1, 1],
                                   stride=1,
                                   scope='conv1')
            if stride == 1:
                # Deformable blocks
                if deformable is not None and deformable == '1':
                    end_point = 'Deformation'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Deform'):
                            residual_feature = slim.conv2d(residual,
                                                           depth_bottleneck,
                                                           3,
                                                           stride,
                                                           rate=rate,
                                                           padding='SAME',
                                                           scope='feature')
                            residual_shape = residual_feature.get_shape(
                            ).as_list()
                            b = residual_shape[0]
                            h = residual_shape[1]
                            w = residual_shape[2]
                            c = residual_shape[3]
                            residual_offset = slim.conv2d(inputs,
                                                          2 * depth_bottleneck,
                                                          3,
                                                          stride,
                                                          rate=rate,
                                                          padding='SAME',
                                                          scope='offset')
                            residual = df._to_b_h_w_c(
                                df.tf_batch_map_offsets(
                                    df._to_bc_h_w(residual_feature,
                                                  residual_shape),
                                    df._to_bc_h_w_2(residual_offset,
                                                    residual_shape)),
                                residual_shape)
                else:
                    residual = slim.conv2d(residual,
                                           depth_bottleneck,
                                           3,
                                           stride,
                                           rate=rate,
                                           padding='SAME',
                                           scope='conv2')
                # Attention blocks
                if attention_option is not None and attention_option[0] == '1':
                    end_point = 'Attention_S'
                    with tf.variable_scope(end_point):
                        residual_shape = residual.get_shape().as_list()
                        b = residual_shape[0]
                        h = residual_shape[1]
                        w = residual_shape[2]
                        c = residual_shape[3]
                        with tf.variable_scope('Spatial'):
                            attention_map = slim.conv2d(
                                residual,
                                c,
                                3,
                                stride=1,
                                rate=rate,
                                scope='attention_s_kernel')
                            attention_map = df._to_b_h_w_c(
                                tf.nn.softmax(
                                    df._to_bc_hw(attention_map,
                                                 residual_shape)),
                                residual_shape)
                        residual = residual * attention_map

                if attention_option is not None and attention_option[1] == '1':
                    end_point = 'Attention_C'
                    with tf.variable_scope(end_point):
                        residual_shape = residual.get_shape().as_list()
                        b = residual_shape[0]
                        h = residual_shape[1]
                        w = residual_shape[2]
                        c = residual_shape[3]
                        with tf.variable_scope('Channel'):
                            attention_map = slim.conv2d(
                                residual,
                                c,
                                3,
                                stride=1,
                                rate=rate,
                                scope='attention_c_kernel')
                            attention_map = tf.nn.softmax(
                                tf.reduce_mean(tf.reshape(
                                    attention_map, [b * h * w, c]),
                                               axis=0))
                        residual = residual * attention_map

                if attention_option is not None and attention_option[2] == '1':
                    end_point = 'Attention_S'
                    with tf.variable_scope(end_point):
                        residual_shape = residual.get_shape().as_list()
                        b = residual_shape[0]
                        h = residual_shape[1]
                        w = residual_shape[2]
                        c = residual_shape[3]
                        with tf.variable_scope('Spatial'):
                            attention_map = slim.conv2d(
                                residual,
                                c,
                                3,
                                stride=1,
                                rate=rate,
                                scope='attention_s_kernel')
                            attention_map = df._to_b_h_w_c(
                                tf.nn.softmax(
                                    df._to_bc_hw(attention_map,
                                                 residual_shape)),
                                residual_shape)
                        residual = residual * attention_map

                if attention_option is not None and attention_option[3] == '1':
                    end_point = 'Attention_M'
                    with tf.variable_scope(end_point):
                        residual_shape = residual.get_shape().as_list()
                        b = residual_shape[0]
                        h = residual_shape[1]
                        w = residual_shape[2]
                        c = residual_shape[3]
                        with tf.variable_scope('Modulation'):
                            attention_map = slim.conv2d(
                                residual,
                                c,
                                3,
                                stride=1,
                                rate=rate,
                                scope='attention_m_kernel')
                            attention_map = tf.clip_by_value(
                                attention_map, 0, 1)
                        residual = residual * attention_map

            else:
                residual = resnet_utils.conv2d_same(residual,
                                                    depth_bottleneck,
                                                    3,
                                                    stride,
                                                    rate=rate,
                                                    scope='conv2')

            residual = slim.conv2d(residual,
                                   depth, [1, 1],
                                   stride=1,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='conv3')

            output = shortcut + residual

            return slim.utils.collect_named_outputs(outputs_collections,
                                                    sc.name, output)
コード例 #16
0
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None,
               use_bounded_activations=False):
               
  """Bottleneck residual unit variant with BN after convolutions.

  This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
  its definition. Note that we use here the bottleneck variant which has an
  extra bottleneck layer.


  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    输入形状:[batch,256,30,1]
    inputs: A tensor of size [batch, height, width, channels].
    深度:ResNet的输出深度
    depth: The depth of the ResNet unit output.
    残差层的深度
    depth_bottleneck: The depth of the bottleneck layers.
    步长,应该是用来匹配维度的
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    未知
    rate: An integer, rate for atrous convolution.
    用来添加ResNet单元的输出
    outputs_collections: Collection to add the ResNet unit output.
    未知
    scope: Optional variable_scope.
    是否使用有界激活
    use_bounded_activations: Whether or not to use bounded activations. Bounded
      activations better lend themselves to quantized inference.

  Returns:
    返回ResNet单元的输出
    The ResNet unit's output.
  """

    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        # 返回输入tensor的最后一个维度,这个最后一个维度用min_rank来指定
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        # short-cut部分
        if depth == depth_in:
           # 如果ResNet的输出结果的通道数等于当前残差块输入tensor的通道数
          #  则对输入的tensor进行下采样,使输入tensor的宽高等于残差输出的宽高
          shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
          # 如果输入输出的通道数不同,则使用1维卷积进行扩充通道数,并根据步长来调整维度
          # 参数分别为:输入tensor、输出的通道数等价于卷积核的数量、步长、激活函数
          shortcut = slim.conv2d(
            inputs,
            depth, [1, 1],
            stride=stride,
            activation_fn=tf.nn.relu6 if use_bounded_activations else None,
            scope='shortcut')
      
        # 残差部分
        residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
                            scope='conv1')
        residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
                                            rate=rate, scope='conv2')
        residual = slim.conv2d(residual, depth, [1, 1], stride=1,
                            activation_fn=None, scope='conv3')

        if use_bounded_activations:
        # Use clip_by_value to simulate bandpass activation.
        residual = tf.clip_by_value(residual, -6.0, 6.0)
        output = tf.nn.relu6(shortcut + residual)
        else:
        output = tf.nn.relu(shortcut + residual)
        # 对快速链接层和残差层进行加和操作后,直接输出即可
        return slim.utils.collect_named_outputs(outputs_collections,
                                                sc.name,
                                                output)
コード例 #17
0
    def bottle_x_neck(self,
                      inputs,
                      depth,
                      depth_bottleneck,
                      stride,
                      rate=1,
                      deformable=None,
                      attention_option=None,
                      outputs_collections=None,
                      scope=None):
        """
        ResNext
        """
        with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
            depth_in = slim.utils.last_dimension(inputs.get_shape(),
                                                 min_rank=4)
            # preact = slim.batch_norm(inputs, scope='preact')
            preact = tf.nn.relu(net)
            if depth == depth_in:
                shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
            else:
                shortcut = slim.conv2d(preact,
                                       depth, [1, 1],
                                       stride=stride,
                                       normalizer_fn=None,
                                       activation_fn=None,
                                       scope='shortcut')
            """
            # ResNet
            residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
            residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
            residual = slim.conv2d(residual, depth, [1, 1], stride=1,
                                   normalizer_fn=None, activation_fn=None,
                                   scope='conv3')
            """
            depth_bottleneck_per = depth_bottleneck / 32
            residual_split = []
            for i in range(32):
                net = slim.conv2d(preact,
                                  depth_bottleneck_per, [1, 1],
                                  stride=1,
                                  scope='conv1_%d' % i)
                net = resnet_utils.conv2d_same(net,
                                               depth_bottleneck_per,
                                               3,
                                               stride,
                                               rate=rate,
                                               scope='conv2_%d' % i)
                residual_split.append(net)
            residual = tf.concat(residual_split, axis=3)
            residual = slim.conv2d(residual,
                                   depth, [1, 1],
                                   stride=1,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='conv3')

            output = shortcut + residual

            return slim.utils.collect_named_outputs(outputs_collections,
                                                    sc.name, output)
コード例 #18
0
ファイル: resnet_v1.py プロジェクト: MisterTab/TensorFlow
def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None,
               use_bounded_activations=False):
    """Bottleneck residual unit variant with BN after convolutions.
       卷积之后进行BN的瓶颈残差单元变体

    This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
    its definition. Note that we use here the bottleneck variant which has an
    extra bottleneck layer.

    When putting together two consecutive ResNet blocks that use this unit, one
    should use stride = 2 in the last unit of the first block.

    Args:
      inputs: A tensor of size [batch, height, width, channels].
              输入张量
      depth: The depth of the ResNet unit output.
             网络输出的深度
      depth_bottleneck: The depth of the bottleneck layers.
                        瓶颈层的深度
      stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input.
              残差单元的步长
      rate: An integer, rate for atrous convolution.
            带洞卷积的比例
      outputs_collections: Collection to add the ResNet unit output.
                           收集残差单元的输出
      scope: Optional variable_scope.
             可选的变量作用域
      use_bounded_activations: Whether or not to use bounded activations. Bounded
                               是否使用bounded activations
        activations better lend themselves to quantized inference.

    Returns:
      The ResNet unit's output.
      返回残差单元的输出
    """
    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        if depth == depth_in:
            shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(
                inputs,
                depth, [1, 1],
                stride=stride,
                activation_fn=tf.nn.relu6 if use_bounded_activations else None,
                scope='shortcut')

        residual = slim.conv2d(inputs,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = resnet_utils.conv2d_same(residual,
                                            depth_bottleneck,
                                            3,
                                            stride,
                                            rate=rate,
                                            scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               activation_fn=None,
                               scope='conv3')

        if use_bounded_activations:
            # Use clip_by_value to simulate bandpass activation.
            residual = tf.clip_by_value(residual, -6.0, 6.0)
            output = tf.nn.relu6(shortcut + residual)
        else:
            output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
コード例 #19
0
  def testStridingLastUnitVsSubsampleBlockEnd(self):
    """Compares subsampling at the block's last unit or block's end.
    Makes sure that the final output is the same when we use a stride at the
    last unit of a block vs. we subsample activations at the end of a block.
    """
    block = resnet_v1.resnet_v1_block

    blocks = [
        block('block1', base_depth=1, num_units=2, stride=2),
        block('block2', base_depth=2, num_units=2, stride=2),
        block('block3', base_depth=4, num_units=2, stride=2),
        block('block4', base_depth=8, num_units=2, stride=1),
    ]

    # Test both odd and even input dimensions.
    height = 30
    width = 31
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      with slim.arg_scope([slim.batch_norm], is_training=False):
        for output_stride in [1, 2, 4, 8, None]:
          with tf.Graph().as_default():
            with self.test_session() as sess:
              tf.set_random_seed(0)
              inputs = create_test_input(1, height, width, 3)

              # Subsampling at the last unit of the block.
              output = resnet_utils.stack_blocks_dense(
                  inputs, blocks, output_stride,
                  store_non_strided_activations=False,
                  outputs_collections='output')
              output_end_points = slim.utils.convert_collection_to_dict(
                  'output')

              # Make the two networks use the same weights.
              tf.get_variable_scope().reuse_variables()

              # Subsample activations at the end of the blocks.
              expected = resnet_utils.stack_blocks_dense(
                  inputs, blocks, output_stride,
                  store_non_strided_activations=True,
                  outputs_collections='expected')
              expected_end_points = slim.utils.convert_collection_to_dict(
                  'expected')

              sess.run(tf.global_variables_initializer())

              # Make sure that the final output is the same.
              output, expected = sess.run([output, expected])
              self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)

              # Make sure that intermediate block activations in
              # output_end_points are subsampled versions of the corresponding
              # ones in expected_end_points.
              for i, block in enumerate(blocks[:-1:]):
                output = output_end_points[block.scope]
                expected = expected_end_points[block.scope]
                atrous_activated = (output_stride is not None and
                                    2 ** i >= output_stride)
                if not atrous_activated:
                  expected = resnet_utils.subsample(expected, 2)
                output, expected = sess.run([output, expected])
                self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)