Example #1
0
 def test_bottleneck_block_with_superpixel_layer(self):
   """Test for creating a model with fused bottleneck block arguments."""
   images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)
   global_params = efficientnet_model.GlobalParams(
       1.0,
       1.0,
       0,
       'channels_last',
       num_classes=10,
       batch_norm=utils.TpuBatchNormalization)
   blocks_args = [
       efficientnet_model.BlockArgs(
           kernel_size=3,
           num_repeat=3,
           input_filters=3,
           output_filters=6,
           expand_ratio=6,
           id_skip=True,
           strides=[2, 2],
           conv_type=0,
           fused_conv=0,
           super_pixel=1)
   ]
   model = efficientnet_model.Model(blocks_args, global_params)
   outputs = model(images, training=True)
   self.assertEqual((10, 10), outputs.shape)
Example #2
0
 def test_variables(self):
   """Test for variables in blocks to be included in `model.variables`."""
   images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)
   global_params = efficientnet_model.GlobalParams(
       1.0,
       1.0,
       0,
       'channels_last',
       num_classes=10,
       batch_norm=utils.TpuBatchNormalization)
   blocks_args = [
       efficientnet_model.BlockArgs(
           kernel_size=3,
           num_repeat=3,
           input_filters=3,
           output_filters=6,
           expand_ratio=6,
           id_skip=False,
           strides=[2, 2],
           se_ratio=0.8,
           conv_type=0,
           fused_conv=0,
           super_pixel=0)
   ]
   model = efficientnet_model.Model(blocks_args, global_params)
   _ = model(images, training=True)
   var_names = {var.name for var in model.variables}
   self.assertIn('model/blocks_0/conv2d/kernel:0', var_names)
Example #3
0
 def test_reduction_endpoint_with_single_block_without_sp(self):
   """Test reduction point with single block/layer."""
   images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)
   global_params = efficientnet_model.GlobalParams(
       1.0,
       1.0,
       0,
       'channels_last',
       num_classes=10,
       batch_norm=utils.TpuBatchNormalization)
   blocks_args = [
       efficientnet_model.BlockArgs(
           kernel_size=3,
           num_repeat=1,
           input_filters=3,
           output_filters=6,
           expand_ratio=6,
           id_skip=False,
           strides=[2, 2],
           se_ratio=0.8,
           conv_type=0,
           fused_conv=0,
           super_pixel=0)
   ]
   model = efficientnet_model.Model(blocks_args, global_params)
   _ = model(images, training=True)
   self.assertIn('reduction_1', model.endpoints)
   # single block should have one and only one reduction endpoint
   self.assertNotIn('reduction_2', model.endpoints)
def efficientnet_lite(width_coefficient=None,
                      depth_coefficient=None,
                      dropout_rate=0.2,
                      survival_prob=0.8):
    """Creates a efficientnet model."""
    global_params = efficientnet_model.GlobalParams(
        blocks_args=_DEFAULT_BLOCKS_ARGS,
        batch_norm_momentum=0.99,
        batch_norm_epsilon=1e-3,
        dropout_rate=dropout_rate,
        survival_prob=survival_prob,
        data_format='channels_last',
        num_classes=1000,
        width_coefficient=width_coefficient,
        depth_coefficient=depth_coefficient,
        depth_divisor=8,
        min_depth=None,
        relu_fn=tf.nn.relu6,  # Relu6 is for easier quantization.
        # The default is TPU-specific batch norm.
        # The alternative is tf.layers.BatchNormalization.
        batch_norm=utils.TpuBatchNormalization,  # TPU-specific requirement.
        clip_projection_output=False,
        fix_head_stem=True,  # Don't scale stem and head.
        local_pooling=True,  # special cases for tflite issues.
        use_se=False)  # SE is not well supported on many lite devices.
    return global_params
Example #5
0
def efficientnet(width_coefficient=None,
                 depth_coefficient=None,
                 dropout_rate=0.2,
                 survival_prob=0.8):
    """Creates a efficientnet model."""
    global_params = efficientnet_model.GlobalParams(
        blocks_args=_DEFAULT_BLOCKS_ARGS,
        batch_norm_momentum=0.99,
        batch_norm_epsilon=1e-3,
        dropout_rate=dropout_rate,
        survival_prob=survival_prob,
        data_format='channels_last',
        num_classes=1000,
        width_coefficient=width_coefficient,
        depth_coefficient=depth_coefficient,
        depth_divisor=8,
        min_depth=None,
        relu_fn=tf.nn.swish,
        # The default is TPU-specific batch norm.
        # The alternative is tf.layers.BatchNormalization.
        batch_norm=utils.TpuBatchNormalization,  # TPU-specific requirement.
        use_se=True,
        clip_projection_output=False)
    return global_params