示例#1
0
 def testMobilenetBase(self):
     tf.reset_default_graph()
     # Verifies that mobilenet_base returns pre-pooling layer.
     with slim.arg_scope((mobilenet.depth_multiplier, ), min_depth=32):
         net, _ = mobilenet_v2.mobilenet_base(tf.placeholder(
             tf.float32, (10, 224, 224, 16)),
                                              conv_defs=mobilenet_v2.V2_DEF,
                                              depth_multiplier=0.1)
         self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
示例#2
0
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
        preprocessed_inputs = shape_utils.check_min_image_dim(
            33, preprocessed_inputs)

        feature_map_layout = {
            'from_layer':
            ['layer_15/expansion_output', 'layer_19', '', '', '',
             ''][:self._num_layers],
            'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
            'use_depthwise':
            self._use_depthwise,
            'use_explicit_padding':
            self._use_explicit_padding,
        }

        with tf.variable_scope('MobilenetV2',
                               reuse=self._reuse_weights) as scope:
            with slim.arg_scope(
                mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \
                slim.arg_scope(
                    [mobilenet.depth_multiplier], min_depth=self._min_depth):
                with (slim.arg_scope(self._conv_hyperparams_fn())
                      if self._override_base_feature_extractor_hyperparams else
                      context_manager.IdentityContextManager()):
                    _, image_features = mobilenet_v2.mobilenet_base(
                        ops.pad_to_multiple(preprocessed_inputs,
                                            self._pad_to_multiple),
                        final_endpoint='layer_19',
                        depth_multiplier=self._depth_multiplier,
                        use_explicit_padding=self._use_explicit_padding,
                        scope=scope)
                with slim.arg_scope(self._conv_hyperparams_fn()):
                    feature_maps = feature_map_generators.multi_resolution_feature_maps(
                        feature_map_layout=feature_map_layout,
                        depth_multiplier=self._depth_multiplier,
                        min_depth=self._min_depth,
                        insert_1x1_conv=True,
                        image_features=image_features)

        return feature_maps.values()
示例#3
0
def _mobilenet_v2(net,
                  depth_multiplier,
                  output_stride,
                  conv_defs=None,
                  divisible_by=None,
                  reuse=None,
                  scope=None,
                  final_endpoint=None):
    """Auxiliary function to add support for 'reuse' to mobilenet_v2.

  Args:
    net: Input tensor of shape [batch_size, height, width, channels].
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    output_stride: An integer that specifies the requested ratio of input to
      output spatial resolution. If not None, then we invoke atrous convolution
      if necessary to prevent the network from reducing the spatial resolution
      of the activation maps. Allowed values are 8 (accurate fully convolutional
      mode), 16 (fast fully convolutional mode), 32 (classification mode).
    conv_defs: MobileNet con def.
    divisible_by: None (use default setting) or an integer that ensures all
      layers # channels will be divisible by this number. Used in MobileNet.
    reuse: Reuse model variables.
    scope: Optional variable scope.
    final_endpoint: The endpoint to construct the network up to.

  Returns:
    Features extracted by MobileNetv2.
  """
    if divisible_by is None:
        divisible_by = 8 if depth_multiplier == 1.0 else 1
    if conv_defs is None:
        conv_defs = mobilenet_v2.V2_DEF
    with tf.variable_scope(scope, 'MobilenetV2', [net], reuse=reuse) as scope:
        return mobilenet_v2.mobilenet_base(
            net,
            conv_defs=conv_defs,
            depth_multiplier=depth_multiplier,
            min_depth=8 if depth_multiplier == 1.0 else 1,
            divisible_by=divisible_by,
            final_endpoint=final_endpoint or _MOBILENET_V2_FINAL_ENDPOINT,
            output_stride=output_stride,
            scope=scope)
示例#4
0
    def encode(self, input_tensor, name):
        """
        根据vgg16框架对输入的tensor进行编码
        :param input_tensor:
        :param name:
        :param flags:
        :return: 输出vgg16编码特征
        """

        # print(self._phase)
        # model_path = '/logs/'
        # input_tensor = tf.Print(input_tensor, [tf.reduce_sum(tf.to_int32(tf.is_nan(input_tensor))) > 0],
        #                         message="input_tensor")

        with slim.arg_scope(mobilenet_v2.training_scope(is_training=self._is_training)):
            logits, endpoints = mobilenet_v2.mobilenet_base(
                input_tensor=input_tensor,
                is_training=self._is_training)

            ret = OrderedDict()

            ret['layer_7'] = dict()
            # asymetric_7 = self.conv2d(inputdata=endpoints["layer_7"], out_channel=32,
            #                           kernel_size=[3, 1], use_bias=False, name='asymetric_7')
            ret['layer_7']['data'] = endpoints["layer_7"]
            ret['layer_7']['shape'] = endpoints["layer_7"].get_shape().as_list()

            ret['layer_14'] = dict()
            # asymetric_14 = self.conv2d(inputdata=endpoints["layer_14"], out_channel=96,
            #                            kernel_size=[3, 1], use_bias=False, name='asymetric_14')
            ret['layer_14']['data'] = endpoints["layer_14"]
            ret['layer_14']['shape'] = endpoints["layer_14"].get_shape().as_list()

            ret['layer_18'] = dict()
            # asymetric_19 = self.conv2d(inputdata=endpoints["layer_19"], out_channel=1280,
            #                            kernel_size=[3, 1], use_bias=False, name='asymetric_19')
            ret['layer_18']['data'] = endpoints["layer_18"]
            ret['layer_18']['shape'] = endpoints["layer_18"].get_shape().as_list()

        return ret
示例#5
0
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
        preprocessed_inputs = shape_utils.check_min_image_dim(
            33, preprocessed_inputs)

        with tf.variable_scope('MobilenetV2',
                               reuse=self._reuse_weights) as scope:
            with slim.arg_scope(
                mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \
                slim.arg_scope(
                    [mobilenet.depth_multiplier], min_depth=self._min_depth):
                with (slim.arg_scope(self._conv_hyperparams_fn())
                      if self._override_base_feature_extractor_hyperparams else
                      context_manager.IdentityContextManager()):
                    _, image_features = mobilenet_v2.mobilenet_base(
                        ops.pad_to_multiple(preprocessed_inputs,
                                            self._pad_to_multiple),
                        final_endpoint='layer_19',
                        depth_multiplier=self._depth_multiplier,
                        conv_defs=self._conv_defs,
                        use_explicit_padding=self._use_explicit_padding,
                        scope=scope)
            depth_fn = lambda d: max(int(d * self._depth_multiplier), self.
                                     _min_depth)
            with slim.arg_scope(self._conv_hyperparams_fn()):
                with tf.variable_scope('fpn', reuse=self._reuse_weights):
                    feature_blocks = [
                        'layer_4', 'layer_7', 'layer_14', 'layer_19'
                    ]
                    base_fpn_max_level = min(self._fpn_max_level, 5)
                    feature_block_list = []
                    for level in range(self._fpn_min_level,
                                       base_fpn_max_level + 1):
                        feature_block_list.append(feature_blocks[level - 2])
                    fpn_features = feature_map_generators.fpn_top_down_feature_maps(
                        [(key, image_features[key])
                         for key in feature_block_list],
                        depth=depth_fn(self._additional_layer_depth),
                        use_depthwise=self._use_depthwise,
                        use_explicit_padding=self._use_explicit_padding)
                    feature_maps = []
                    for level in range(self._fpn_min_level,
                                       base_fpn_max_level + 1):
                        feature_maps.append(fpn_features['top_down_{}'.format(
                            feature_blocks[level - 2])])
                    last_feature_map = fpn_features['top_down_{}'.format(
                        feature_blocks[base_fpn_max_level - 2])]
                    # Construct coarse features
                    padding = 'VALID' if self._use_explicit_padding else 'SAME'
                    kernel_size = 3
                    for i in range(base_fpn_max_level + 1,
                                   self._fpn_max_level + 1):
                        if self._use_depthwise:
                            conv_op = functools.partial(slim.separable_conv2d,
                                                        depth_multiplier=1)
                        else:
                            conv_op = slim.conv2d
                        if self._use_explicit_padding:
                            last_feature_map = ops.fixed_padding(
                                last_feature_map, kernel_size)
                        last_feature_map = conv_op(
                            last_feature_map,
                            num_outputs=depth_fn(self._additional_layer_depth),
                            kernel_size=[kernel_size, kernel_size],
                            stride=2,
                            padding=padding,
                            scope='bottom_up_Conv2d_{}'.format(
                                i - base_fpn_max_level + 19))
                        feature_maps.append(last_feature_map)
        return feature_maps