def testBuildNonExistingLayerLargeModel(self): """Tests that the model is built correctly without unnecessary layers.""" inputs = tf.random_uniform((5, 331, 331, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): pnasnet.build_pnasnet_large(inputs, 1000) vars_names = [x.op.name for x in tf.trainable_variables()] self.assertIn('cell_stem_0/1x1/weights', vars_names) self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) self.assertFalse('AuxLogits' in end_points) self.assertFalse('Predictions' in end_points) self.assertTrue(net.op.name.startswith('final_layer/Mean')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testOverrideHParamsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = pnasnet.large_imagenet_config() config.set_hparam('data_format', 'NCHW') with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): _, end_points = pnasnet.build_pnasnet_large( inputs, num_classes, config=config) self.assertListEqual( end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the PNASNet network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] end_points: A dictionary mapping feature extractor tensor names to tensors Raises: ValueError: If the created network is missing the required activation. """ del scope if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(pnasnet_large_arg_scope_for_detection( is_batch_norm_training=self._train_batch_norm)): with arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): _, end_points = pnasnet.build_pnasnet_large( preprocessed_inputs, num_classes=None, is_training=self._is_training, final_endpoint='Cell_7') # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160. # Cell_7 is the last cell before second reduction. rpn_feature_map = tf.concat([end_points['Cell_6'], end_points['Cell_7']], 3) # pnasnet.py does not maintain the batch size in the first dimension. # This work around permits us retaining the batch for below. batch = preprocessed_inputs.get_shape().as_list()[0] shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] rpn_feature_map_shape = [batch] + shape_without_batch rpn_feature_map.set_shape(rpn_feature_map_shape) return rpn_feature_map, end_points
def testNoAuxHeadLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 for use_aux_head in (True, False): tf.reset_default_graph() inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() config = pnasnet.large_imagenet_config() config.set_hparam('use_aux_head', int(use_aux_head)) with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes, config=config) self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testBuildLogitsLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ feature_map_layout = { 'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with slim.arg_scope( pnasnet_large_arg_scope_for_detection( is_batch_norm_training=self._is_training)): with slim.arg_scope( [slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = pnasnet.build_pnasnet_large( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=self._is_training, final_endpoint='Cell_11') with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
def testAllEndPointsShapesLargeModel(self): batch_size = 5 height, width = 331, 331 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) tf.train.create_global_step() with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): _, end_points = pnasnet.build_pnasnet_large(inputs, num_classes) endpoints_shapes = {'Stem': [batch_size, 42, 42, 540], 'Cell_0': [batch_size, 42, 42, 1080], 'Cell_1': [batch_size, 42, 42, 1080], 'Cell_2': [batch_size, 42, 42, 1080], 'Cell_3': [batch_size, 42, 42, 1080], 'Cell_4': [batch_size, 21, 21, 2160], 'Cell_5': [batch_size, 21, 21, 2160], 'Cell_6': [batch_size, 21, 21, 2160], 'Cell_7': [batch_size, 21, 21, 2160], 'Cell_8': [batch_size, 11, 11, 4320], 'Cell_9': [batch_size, 11, 11, 4320], 'Cell_10': [batch_size, 11, 11, 4320], 'Cell_11': [batch_size, 11, 11, 4320], 'global_pool': [batch_size, 4320], # Logits and predictions 'AuxLogits': [batch_size, 1000], 'Predictions': [batch_size, 1000], 'Logits': [batch_size, 1000], } self.assertEqual(len(end_points), 17) self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: tf.logging.info('Endpoint name: {}'.format(endpoint_name)) expected_shape = endpoints_shapes[endpoint_name] self.assertIn(endpoint_name, end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)