def testWithSplits(self):
   spec = copy.deepcopy(mobilenet_v2.V2_DEF)
   spec['overrides'] = {
       (ops.expanded_conv,): dict(split_expansion=2),
   }
   _, _ = mobilenet.mobilenet(
       tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
   num_convs = len(find_ops('Conv2D'))
   # All but 3 op has 3 conv operatore, the remainign 3 have one
   # and there is one unaccounted.
   self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
  def testCreation(self):
    spec = dict(mobilenet_v2.V2_DEF)
    _, ep = mobilenet.mobilenet(
        tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
    num_convs = len(find_ops('Conv2D'))

    # This is mostly a sanity test. No deep reason for these particular
    # constants.
    #
    # All but first 2 and last one have  two convolutions, and there is one
    # extra conv that is not in the spec. (logits)
    self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
    # Check that depthwise are exposed.
    for i in range(2, 17):
      self.assertIn('layer_%d/depthwise_output' % i, ep)
Beispiel #3
0
    def testCreation(self):
        spec = dict(mobilenet_v2.V2_DEF)
        _, ep = mobilenet.mobilenet(tf.placeholder(tf.float32,
                                                   (10, 224, 224, 16)),
                                    conv_defs=spec)
        num_convs = len(find_ops('Conv2D'))

        # This is mostly a sanity test. No deep reason for these particular
        # constants.
        #
        # All but first 2 and last one have  two convolutions, and there is one
        # extra conv that is not in the spec. (logits)
        self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
        # Check that depthwise are exposed.
        for i in range(2, 17):
            self.assertIn('layer_%d/depthwise_output' % i, ep)
def mobilenetBase(input_tensor,
                  depth_multiplier=1.0,
                  scope='MobilenetV2',
                  conv_defs=None,
                  finegrain_classification_mode=False,
                  min_depth=None,
                  divisible_by=None,
                  activation_fn=None,
                  **kwargs):
    if conv_defs is None:
        conv_defs = V2_DEF
    if 'multiplier' in kwargs:
        raise ValueError(
            'mobilenetv2 doesn\'t support generic '
            'multiplier parameter use "depth_multiplier" instead.')
    if finegrain_classification_mode:
        conv_defs = copy.deepcopy(conv_defs)
        if depth_multiplier < 1:
            conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
    if activation_fn:
        conv_defs = copy.deepcopy(conv_defs)
        defaults = conv_defs['defaults']
        conv_defaults = (defaults[(slim.conv2d, slim.fully_connected,
                                   slim.separable_conv2d)])
        conv_defaults['activation_fn'] = activation_fn

    depth_args = {}
    # NB: do not set depth_args unless they are provided to avoid overriding
    # whatever default depth_multiplier might have thanks to arg_scope.
    if min_depth is not None:
        depth_args['min_depth'] = min_depth
    if divisible_by is not None:
        depth_args['divisible_by'] = divisible_by

    with slim.arg_scope((lib.depth_multiplier, ), **depth_args):
        return lib.mobilenet(input_tensor,
                             conv_defs=conv_defs,
                             scope=scope,
                             multiplier=depth_multiplier,
                             **kwargs)
def mobilenet(input_tensor,
              num_classes=1001,
              depth_multiplier=1.0,
              scope='MobilenetV2',
              conv_defs=None,
              finegrain_classification_mode=False,
              min_depth=None,
              divisible_by=None,
              **kwargs):
  """Creates mobilenet V2 network.
  Inference mode is created by default. To create training use training_scope
  below.
  with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
     logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
  Args:
    input_tensor: The input tensor
    num_classes: number of classes
    depth_multiplier: The multiplier applied to scale number of
    channels in each layer. Note: this is called depth multiplier in the
    paper but the name is kept for consistency with slim's model builder.
    scope: Scope of the operator
    conv_defs: Allows to override default conv def.
    finegrain_classification_mode: When set to True, the model
    will keep the last layer large even for small multipliers. Following
    https://arxiv.org/abs/1801.04381
    suggests that it improves performance for ImageNet-type of problems.
      *Note* ignored if final_endpoint makes the builder exit earlier.
    min_depth: If provided, will ensure that all layers will have that
    many channels after application of depth multiplier.
    divisible_by: If provided will ensure that all layers # channels
    will be divisible by this number.
    **kwargs: passed directly to mobilenet.mobilenet:
      prediction_fn- what prediction function to use.
      reuse-: whether to reuse variables (if reuse set to true, scope
      must be given).
  Returns:
    logits/endpoints pair
  Raises:
    ValueError: On invalid arguments
  """
  if conv_defs is None:
    conv_defs = V2_DEF
  if 'multiplier' in kwargs:
    raise ValueError('mobilenetv2 doesn\'t support generic '
                     'multiplier parameter use "depth_multiplier" instead.')
  if finegrain_classification_mode:
    conv_defs = copy.deepcopy(conv_defs)
    if depth_multiplier < 1:
      conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier

  depth_args = {}
  # NB: do not set depth_args unless they are provided to avoid overriding
  # whatever default depth_multiplier might have thanks to arg_scope.
  if min_depth is not None:
    depth_args['min_depth'] = min_depth
  if divisible_by is not None:
    depth_args['divisible_by'] = divisible_by

  with slim.arg_scope((lib.depth_multiplier,), **depth_args):
    return lib.mobilenet(
        input_tensor,
        num_classes=num_classes,
        conv_defs=conv_defs,
        scope=scope,
        multiplier=depth_multiplier,
        **kwargs)
 def testCreationNoClasses(self):
     spec = copy.deepcopy(mobilenet_v2.V2_DEF)
     net, ep = mobilenet.mobilenet(
         tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
         num_classes=None)
     self.assertIs(net, ep['global_pool'])
Beispiel #7
0
def mobilenet(input_tensor,
              num_classes=1001,
              depth_multiplier=1.0,
              scope='MobilenetV2',
              conv_defs=None,
              finegrain_classification_mode=False,
              min_depth=None,
              divisible_by=None,
              **kwargs):
  """Creates mobilenet V2 network.

  Inference mode is created by default. To create training use training_scope
  below.

  with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
     logits, endpoints = mobilenet_v2.mobilenet(input_tensor)

  Args:
    input_tensor: The input tensor
    num_classes: number of classes
    depth_multiplier: The multiplier applied to scale number of
    channels in each layer. Note: this is called depth multiplier in the
    paper but the name is kept for consistency with slim's model builder.
    scope: Scope of the operator
    conv_defs: Allows to override default conv def.
    finegrain_classification_mode: When set to True, the model
    will keep the last layer large even for small multipliers. Following
    https://arxiv.org/abs/1801.04381
    suggests that it improves performance for ImageNet-type of problems.
      *Note* ignored if final_endpoint makes the builder exit earlier.
    min_depth: If provided, will ensure that all layers will have that
    many channels after application of depth multiplier.
    divisible_by: If provided will ensure that all layers # channels
    will be divisible by this number.
    **kwargs: passed directly to mobilenet.mobilenet:
      prediction_fn- what prediction function to use.
      reuse-: whether to reuse variables (if reuse set to true, scope
      must be given).
  Returns:
    logits/endpoints pair

  Raises:
    ValueError: On invalid arguments
  """
  if conv_defs is None:
    conv_defs = V2_DEF
  if 'multiplier' in kwargs:
    raise ValueError('mobilenetv2 doesn\'t support generic '
                     'multiplier parameter use "depth_multiplier" instead.')
  if finegrain_classification_mode:
    conv_defs = copy.deepcopy(conv_defs)
    if depth_multiplier < 1:
      conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier

  depth_args = {}
  # NB: do not set depth_args unless they are provided to avoid overriding
  # whatever default depth_multiplier might have thanks to arg_scope.
  if min_depth is not None:
    depth_args['min_depth'] = min_depth
  if divisible_by is not None:
    depth_args['divisible_by'] = divisible_by

  with slim.arg_scope((lib.depth_multiplier,), **depth_args):
    return lib.mobilenet(
        input_tensor,
        num_classes=num_classes,
        conv_defs=conv_defs,
        scope=scope,
        multiplier=depth_multiplier,
        **kwargs)
 def testCreationNoClasses(self):
   spec = copy.deepcopy(mobilenet_v2.V2_DEF)
   net, ep = mobilenet.mobilenet(
       tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
       num_classes=None)
   self.assertIs(net, ep['global_pool'])
Beispiel #9
0
def mobilenet2(*args, **kwargs):
    return mobilenet.mobilenet(*args, **kwargs)