def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
def training_scope(**kwargs): """Defines MobilenetV2 training scope. Usage: with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()): logits, endpoints = mobilenet_v2.mobilenet(input_tensor) with slim. Args: **kwargs: Passed to mobilenet.training_scope. The following parameters are supported: weight_decay- The weight decay to use for regularizing the model. stddev- Standard deviation for initialization, if negative uses xavier. dropout_keep_prob- dropout keep probability bn_decay- decay for the batch norm moving averages. Returns: An `arg_scope` to use for the mobilenet v2 model. """ return lib.training_scope(**kwargs)
def model_fn(self, is_training=True, *args, **kwargs): x_input = None y_input = None original_image = None if len(args) > 0: if is_training: x_input, y_input = args[0].dequeue() else: x_input, original_image = args[0] x_input = tf.identity(x_input, 'input_node') else: x_input = tf.placeholder( tf.float32, shape=[1, ctx.params.output_size, ctx.params.output_size, 3], name='input_node') with slim.arg_scope(mobilenet.training_scope(is_training=is_training)): _, end_points = self._mobilenet_v2(x_input, depth_multiplier=1.0, output_stride=16) last_layer = end_points['layer_18/output'] # _ x 8 x 8 x _ middle_layer = end_points[ 'layer_4/depthwise_output'] # _ x 32 x 32 x _ with tf.variable_scope(None, 'DECODER', [last_layer, middle_layer], reuse=None): logits = self.decoder(last_layer, middle_layer, is_training, [2, 4, 8], 0.00004) if is_training: # resize to target size logits = tf.image.resize_bilinear( logits, [ctx.params.origin_height, ctx.params.origin_width]) # for labels one_hot_labels = slim.one_hot_encoding(y_input, ctx.params.num_classes) cross_entroy = tf.nn.softmax_cross_entropy_with_logits( labels=one_hot_labels, logits=logits) softmax_logits = tf.nn.softmax(logits, dim=-1) _, logits_1 = tf.split(softmax_logits, num_or_size_splits=2, axis=3) logits_1 = tf.reshape(logits_1, shape=[ ctx.params.batch_size, ctx.params.origin_height, ctx.params.origin_width ]) label_batch_float = tf.cast(y_input, tf.float32) weights_pt = tf.pow( 1 - (logits_1 * label_batch_float + (1 - logits_1) * (1 - label_batch_float)), 2) weights_total = (logits_1 * 5.02 + (1 - logits_1) * 4.98) * weights_pt focal_loss = weights_total * cross_entroy loss = tf.reduce_mean(focal_loss) tf.losses.add_loss(loss) return loss else: logits = tf.nn.softmax(logits) logits = tf.identity(logits, 'output_node') _, positive_s = tf.split(logits, 2, 3) return positive_s, original_image
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])