def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope(self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=self._is_training and self._batch_norm_trainable, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) image_features = self._filter_features(image_features) last_feature_map = image_features['block4'] with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams): for i in range(5, 7): last_feature_map = slim.conv2d(last_feature_map, num_outputs=256, kernel_size=[3, 3], stride=2, padding='SAME', scope='block{}'.format(i)) image_features['bottomup_{}'.format(i)] = last_feature_map feature_maps = feature_map_generators.fpn_top_down_feature_maps( [ image_features[key] for key in [ 'block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6' ] ], depth=256, scope='top_down_features') return feature_maps.values()
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope( is_training=(self._is_training and self._batch_norm_trainable), bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): # TODO(b/68150321): Enable fused batch norm once quantization # supports it. with slim.arg_scope([slim.batch_norm], fused=False): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_19', depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams): # TODO(b/68150321): Enable fused batch norm once quantization # supports it. with slim.arg_scope([slim.batch_norm], fused=False): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', ''][:self._num_layers], 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers], 'use_depthwise': self._use_depthwise, 'use_explicit_padding': self._use_explicit_padding, } with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \ slim.arg_scope( [mobilenet.depth_multiplier], min_depth=self._min_depth): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v2.mobilenet_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='layer_19', depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return list(feature_maps.values())
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams): # TODO: Enable fused batch norm once quantization supports it. with slim.arg_scope([slim.batch_norm], fused=False): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: _, image_features = inception_v2.inception_v2_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier, min_depth, insert_1x1_conv, image_features): """Generates multi resolution feature maps from input image features. Generates multi-scale feature maps for detection as in the SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. More specifically, it performs the following two tasks: 1) If a layer name is provided in the configuration, returns that layer as a feature map. 2) If a layer name is left as an empty string, constructs a new feature map based on the spatial shape and depth configuration. Note that the current implementation only supports generating new layers using convolution of stride 2 resulting in a spatial resolution reduction by a factor of 2. An example of the configuration for Inception V3: { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], 'anchor_strides': [16, 32, 64, -1, -1, -1] } Args: feature_map_layout: Dictionary of specifications for the feature map layouts in the following format (Inception V2/V3 respectively): { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], 'anchor_strides': [16, 32, 64, -1, -1, -1] } or { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128], 'anchor_strides': [16, 32, 64, -1, -1, -1] } If 'from_layer' is specified, the specified feature map is directly used as a box predictor layer, and the layer_depth is directly infered from the feature map (instead of using the provided 'layer_depth' parameter). In this case, our convention is to set 'layer_depth' to -1 for clarity. Otherwise, if 'from_layer' is an empty string, then the box predictor layer will be built from the previous layer using convolution operations. Note that the current implementation only supports generating new layers using convolutions of stride 2 (resulting in a spatial resolution reduction by a factor of 2), and will be extended to a more flexible design. Finally, the optional 'anchor_strides' can be used to specify the anchor stride at each layer where 'from_layer' is specified. Our convention is to set 'anchor_strides' to -1 whenever at the positions that 'from_layer' is an empty string, and anchor strides at these layers will be inferred from the previous layer's anchor strides and the current layer's stride length. In the case where 'anchor_strides' is not specified, the anchor strides will default to the image width and height divided by the number of anchors. depth_multiplier: Depth multiplier for convolutional layers. min_depth: Minimum depth for convolutional layers. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution should be inserted before shrinking the feature map. image_features: A dictionary of handles to activation tensors from the base feature extractor. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. Raises: ValueError: if the number entries in 'from_layer' and 'layer_depth' do not match. ValueError: if the generated layer does not have the same resolution as specified. """ depth_fn = get_depth_fn(depth_multiplier, min_depth) feature_map_keys = [] feature_maps = [] base_from_layer = '' feature_map_strides = None use_depthwise = False if 'anchor_strides' in feature_map_layout: feature_map_strides = (feature_map_layout['anchor_strides']) if 'use_depthwise' in feature_map_layout: use_depthwise = feature_map_layout['use_depthwise'] for index, (from_layer, layer_depth) in enumerate( zip(feature_map_layout['from_layer'], feature_map_layout['layer_depth'])): if from_layer: feature_map = image_features[from_layer] base_from_layer = from_layer feature_map_keys.append(from_layer) else: pre_layer = feature_maps[-1] intermediate_layer = pre_layer if insert_1x1_conv: layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( base_from_layer, index, depth_fn(layer_depth / 2)) intermediate_layer = slim.conv2d( pre_layer, depth_fn(layer_depth / 2), [1, 1], padding='SAME', stride=1, scope=layer_name) stride = 2 layer_name = '{}_2_Conv2d_{}_3x3_s2_{}'.format( base_from_layer, index, depth_fn(layer_depth)) if use_depthwise: feature_map = slim.separable_conv2d( ops.pad_to_multiple(intermediate_layer, stride), None, [3, 3], depth_multiplier=1, padding='SAME', stride=stride, scope=layer_name + '_depthwise') feature_map = slim.conv2d( feature_map, depth_fn(layer_depth), [1, 1], padding='SAME', stride=1, scope=layer_name) else: feature_map = slim.conv2d( ops.pad_to_multiple(intermediate_layer, stride), depth_fn(layer_depth), [3, 3], padding='SAME', stride=stride, scope=layer_name) if (index > 0 and feature_map_strides and feature_map_strides[index - 1] > 0): feature_map_strides[index] = ( stride * feature_map_strides[index - 1]) feature_map_keys.append(layer_name) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope(self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, min_base_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) image_features = self._filter_features(image_features) depth_fn = lambda d: max(int(d * self._depth_multiplier), self. _min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append( fpn_features['top_down_block{}'.format(level - 1)]) last_feature_map = fpn_features['top_down_block{}'.format( base_fpn_max_level - 1)] # Construct coarse features for i in range(base_fpn_max_level, self._fpn_max_level): last_feature_map = slim.conv2d( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[3, 3], stride=2, padding='SAME', scope='bottom_up_block{}'.format(i)) feature_maps.append(last_feature_map) return feature_maps
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, conv_defs=self._conv_defs, use_explicit_padding=self._use_explicit_padding, scope=scope) depth_fn = lambda d: max(int(d * self._depth_multiplier), self. _min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('fpn', reuse=self._reuse_weights): feature_blocks = [ 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', 'Conv2d_13_pointwise' ] base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append(feature_blocks[level - 2]) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( feature_blocks[base_fpn_max_level - 2])] # Construct coarse features padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d if self._use_explicit_padding: last_feature_map = ops.fixed_padding( last_feature_map, kernel_size) last_feature_map = conv_op( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[kernel_size, kernel_size], stride=2, padding=padding, scope='bottom_up_Conv2d_{}'.format( i - base_fpn_max_level + 13)) feature_maps.append(last_feature_map) return feature_maps
def test_padding(self): tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]]) padded_tensor = ops.pad_to_multiple(tensor, 4) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape)