def _build_feature_map_generator(self, image_features, depth, use_keras, use_bounded_activations=False, use_native_resize_op=False, use_explicit_padding=False, use_depthwise=False): if use_keras: return feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=len(image_features), depth=depth, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op, scope=None, name='FeatureMaps', ) else: def feature_map_generator(image_features): return feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=depth, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op) return feature_map_generator
def build(self, input_shape): full_mobilenet_v2 = mobilenet_v2.mobilenet_v2( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, include_top=False) layer_names = [layer.name for layer in full_mobilenet_v2.layers] outputs = [] for layer_idx in [4, 7, 14]: add_name = 'block_{}_add'.format(layer_idx - 2) project_name = 'block_{}_project_BN'.format(layer_idx - 2) output_layer_name = add_name if add_name in layer_names else project_name outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output) layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output outputs.append(layer_19) self._mobilenet_v2 = tf.keras.Model( inputs=full_mobilenet_v2.inputs, outputs=outputs) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max( int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 stride = 2 for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): coarse_feature_layers = [] if self._use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) coarse_feature_layers.append(tf.keras.layers.Lambda( fixed_padding, name='fixed_padding')) layer_name = 'bottom_up_Conv2d_{}'.format( i - self._base_fpn_max_level + NUM_LAYERS) conv_block = feature_map_generators.create_conv_block( self._use_depthwise, kernel_size, padding, stride, layer_name, self._conv_hyperparams, self._is_training, self._freeze_batchnorm, self._depth_fn(self._additional_layer_depth)) coarse_feature_layers.extend(conv_block) self._coarse_feature_layers.append(coarse_feature_layers) self.built = True
def build(self, input_shape): full_resnet_v1_model = self._resnet_v1_base_model( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, classes=None, weights=None, include_top=False) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[ self._resnet_v1_base_model_name] outputs = [ full_resnet_v1_model.get_layer(output_layer_name).output for output_layer_name in output_layers ] self._resnet_v1 = tf.keras.Model(inputs=full_resnet_v1_model.inputs, outputs=outputs) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max(int(d * self._depth_multiplier), self. _min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers depth = self._depth_fn(self._additional_layer_depth) for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] layer_name = 'bottom_up_block{}'.format(i) layers.append( tf.keras.layers.Conv2D(depth, [3, 3], padding='SAME', strides=2, name=layer_name + '_conv', **self._conv_hyperparams.params())) layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( self._conv_hyperparams.build_activation_layer(name=layer_name)) self._coarse_feature_layers.append(layers) self.built = True
def build(self, input_shape): model = build_model_base_keras_model(input_shape[1:], self._network_name, self._is_training) # inputs = tf.keras.layers.Input(input_shape[1:]) inputs = model.inputs # _, endpoints = build_model_base(inputs, self._network_name, self._is_training) # outputs = [endpoints[x] for x in self._used_nodes if x] outputs = [model.get_layer(x).output for x in self._used_nodes if x] self.net = tf.keras.Model(inputs=inputs, outputs=outputs) # fpn feature map generator self._depth_fn = lambda d: max(int(d * self._depth_multiplier), self. _min_depth) self._base_fpn_max_level = min(self._max_feature_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._min_feature_level self._feature_map_generator = feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps') # construct coarse feature layers padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 stride = 2 for i in range(self._base_fpn_max_level + 1, self._max_feature_level + 1): coarse_feature_layers = [] if self._use_explicit_padding: _fixed_padding_fn = lambda x: ops.fixed_padding( x, kernel_size=kernel_size) coarse_feature_layers.append(l.Lambda(_fixed_padding_fn)) layer_name = 'bottom_up_Conv2d_{}'.format( i - self._base_fpn_max_level + self._backbone_layers) conv_block = feature_map_generators.create_conv_block( self._use_depthwise, kernel_size, padding, stride, layer_name, self._conv_hyperparams, self._is_training, self._freeze_batchnorm, self._depth_fn(self._additional_layer_depth)) coarse_feature_layers.extend(conv_block) self._coarse_feature_layers.append(coarse_feature_layers) self.built = True
def build(self, input_shape): full_mobilenet_v1 = mobilenet_v1.mobilenet_v1( batchnorm_training=(self._is_training and not self._freeze_batchnorm), conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), weights=None, use_explicit_padding=self._use_explicit_padding, alpha=self._depth_multiplier, min_depth=self._min_depth, conv_defs=self._conv_defs, include_top=False) conv2d_3_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_3_relu').output conv2d_5_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_5_relu').output conv2d_11_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_11_relu').output conv2d_13_pointwise = full_mobilenet_v1.get_layer( name='conv_pw_13_relu').output self._mobilenet_v1 = tf.keras.Model(inputs=full_mobilenet_v1.inputs, outputs=[ conv2d_3_pointwise, conv2d_5_pointwise, conv2d_11_pointwise, conv2d_13_pointwise ]) # pylint:disable=g-long-lambda self._depth_fn = lambda d: max(int(d * self._depth_multiplier), self. _min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding, use_native_resize_op=self._use_native_resize_op, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) # Construct coarse feature layers padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 stride = 2 for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1): coarse_feature_layers = [] if self._use_explicit_padding: def fixed_padding(features, kernel_size=kernel_size): return ops.fixed_padding(features, kernel_size) coarse_feature_layers.append( tf.keras.layers.Lambda(fixed_padding, name='fixed_padding')) layer_name = 'bottom_up_Conv2d_{}'.format( i - self._base_fpn_max_level + 13) conv_block = feature_map_generators.create_conv_block( self._use_depthwise, kernel_size, padding, stride, layer_name, self._conv_hyperparams, self._is_training, self._freeze_batchnorm, self._depth_fn(self._additional_layer_depth)) coarse_feature_layers.extend(conv_block) self._coarse_feature_layers.append(coarse_feature_layers) self.built = True
def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. Extracts features using the Resnet v1 FPN network. Args: name: A scope name to construct all variables within. Returns: A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. And returns rpn_feature_map: A list of tensors with shape [batch, height, width, depth] """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): full_resnet_v1_model = self._resnet_v1_base_model( batchnorm_training=self._train_batch_norm, conv_hyperparams=( self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), classes=None, weights=None, include_top=False) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[ self._resnet_v1_base_model_name] outputs = [ full_resnet_v1_model.get_layer(output_layer_name).output for output_layer_name in output_layers ] self.classification_backbone = tf.keras.Model( inputs=full_resnet_v1_model.inputs, outputs=outputs) backbone_outputs = self.classification_backbone( full_resnet_v1_model.inputs) # construct FPN feature generator self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._additional_layer_depth, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) feature_block_map = dict( list(zip(self._resnet_block_names, backbone_outputs))) fpn_input_image_features = [ (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list ] fpn_features = self._fpn_features_generator( fpn_input_image_features) # Construct coarse feature layers for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] layer_name = 'bottom_up_block{}'.format(i) layers.append( tf.keras.layers.Conv2D( self._additional_layer_depth, [3, 3], padding='SAME', strides=2, name=layer_name + '_conv', **self._conv_hyperparams.params())) layers.append( self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append( fpn_features['top_down_block{}'.format(level - 1)]) last_feature_map = fpn_features['top_down_block{}'.format( self._base_fpn_max_level - 1)] for coarse_feature_layers in self._coarse_feature_layers: for layer in coarse_feature_layers: last_feature_map = layer(last_feature_map) feature_maps.append(last_feature_map) feature_extractor_model = tf.keras.models.Model( inputs=full_resnet_v1_model.inputs, outputs=feature_maps) return feature_extractor_model
outputs=outputs) <<<<<<< HEAD backbone_outputs = self.classification_backbone( full_resnet_v1_model.inputs) # construct FPN feature generator ======= >>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36 self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, depth=self._additional_layer_depth, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) <<<<<<< HEAD feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) feature_block_map = dict( list(zip(self._resnet_block_names, backbone_outputs))) fpn_input_image_features = [ (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self._fpn_features_generator(fpn_input_image_features)