Пример #1
0
def dense_block(x,
                nb_layers,
                nb_filter,
                growth_rate,
                dropout_rate=None,
                weight_decay=1E-4):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones

    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        dropout_rate: dropout rate
        weight_decay: weight decay factor

    Returns: keras tensor with nb_layers of conv_block appended

    '''

    # concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    ### concatenate bug here ###
    concat_feat = x

    for i in range(nb_layers):
        x = conv_block(concat_feat, growth_rate, dropout_rate, weight_decay)
        concat_feat = concatenate([concat_feat, x],
                                  axis=3,
                                  name='concat_' + str(i))
        nb_filter += growth_rate

    return concat_feat, nb_filter
def fire_module(x, fire_id, squeeze=16, expand=64):
    s_id = 'fire' + str(fire_id) + '/'

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = Convolution2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
    x = Activation('relu', name=s_id + relu + sq1x1)(x)

    left = Convolution2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)
    left = Activation('relu', name=s_id + relu + exp1x1)(left)

    right = Convolution2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)
    right = Activation('relu', name=s_id + relu + exp3x3)(right)

    x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
    return x
Пример #3
0
def InceptionV3(include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000):
  """Instantiates the Inception v3 architecture.

  Optionally loads weights pre-trained
  on ImageNet. Note that when using TensorFlow,
  for best performance you should set
  `image_data_format="channels_last"` in your Keras config
  at ~/.keras/keras.json.
  The model and the weights are compatible with both
  TensorFlow and Theano. The data format
  convention used by the model is the one
  specified in your Keras config file.
  Note that the default input image size for this model is 299x299.

  Arguments:
      include_top: whether to include the fully-connected
          layer at the top of the network.
      weights: one of `None` (random initialization)
          or "imagenet" (pre-training on ImageNet).
      input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
          to use as image input for the model.
      input_shape: optional shape tuple, only to be specified
          if `include_top` is False (otherwise the input shape
          has to be `(299, 299, 3)` (with `channels_last` data format)
          or `(3, 299, 299)` (with `channels_first` data format).
          It should have exactly 3 input channels,
          and width and height should be no smaller than 139.
          E.g. `(150, 150, 3)` would be one valid value.
      pooling: Optional pooling mode for feature extraction
          when `include_top` is `False`.
          - `None` means that the output of the model will be
              the 4D tensor output of the
              last convolutional layer.
          - `avg` means that global average pooling
              will be applied to the output of the
              last convolutional layer, and thus
              the output of the model will be a 2D tensor.
          - `max` means that global max pooling will
              be applied.
      classes: optional number of classes to classify images
          into, only to be specified if `include_top` is True, and
          if no `weights` argument is specified.

  Returns:
      A Keras model instance.

  Raises:
      ValueError: in case of invalid argument for `weights`,
          or invalid input shape.
  """
  if weights not in {'imagenet', None}:
    raise ValueError('The `weights` argument should be either '
                     '`None` (random initialization) or `imagenet` '
                     '(pre-training on ImageNet).')

  if weights == 'imagenet' and include_top and classes != 1000:
    raise ValueError('If using `weights` as imagenet with `include_top`'
                     ' as true, `classes` should be 1000')

  # Determine proper input shape
  input_shape = _obtain_input_shape(
      input_shape,
      default_size=299,
      min_size=139,
      data_format=K.image_data_format(),
      require_flatten=False,
      weights=weights)

  if input_tensor is None:
    img_input = Input(shape=input_shape)
  else:
    img_input = Input(tensor=input_tensor, shape=input_shape)

  if K.image_data_format() == 'channels_first':
    channel_axis = 1
  else:
    channel_axis = 3

  x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
  x = conv2d_bn(x, 32, 3, 3, padding='valid')
  x = conv2d_bn(x, 64, 3, 3)
  x = MaxPooling2D((3, 3), strides=(2, 2))(x)

  x = conv2d_bn(x, 80, 1, 1, padding='valid')
  x = conv2d_bn(x, 192, 3, 3, padding='valid')
  x = MaxPooling2D((3, 3), strides=(2, 2))(x)

  # mixed 0, 1, 2: 35 x 35 x 256
  branch1x1 = conv2d_bn(x, 64, 1, 1)

  branch5x5 = conv2d_bn(x, 48, 1, 1)
  branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch5x5, branch3x3dbl, branch_pool],
      axis=channel_axis,
      name='mixed0')

  # mixed 1: 35 x 35 x 256
  branch1x1 = conv2d_bn(x, 64, 1, 1)

  branch5x5 = conv2d_bn(x, 48, 1, 1)
  branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch5x5, branch3x3dbl, branch_pool],
      axis=channel_axis,
      name='mixed1')

  # mixed 2: 35 x 35 x 256
  branch1x1 = conv2d_bn(x, 64, 1, 1)

  branch5x5 = conv2d_bn(x, 48, 1, 1)
  branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch5x5, branch3x3dbl, branch_pool],
      axis=channel_axis,
      name='mixed2')

  # mixed 3: 17 x 17 x 768
  branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(
      branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')

  branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
  x = layers.concatenate(
      [branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')

  # mixed 4: 17 x 17 x 768
  branch1x1 = conv2d_bn(x, 192, 1, 1)

  branch7x7 = conv2d_bn(x, 128, 1, 1)
  branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
  branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

  branch7x7dbl = conv2d_bn(x, 128, 1, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch7x7, branch7x7dbl, branch_pool],
      axis=channel_axis,
      name='mixed4')

  # mixed 5, 6: 17 x 17 x 768
  for i in range(2):
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 160, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 160, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate(
        [branch1x1, branch7x7, branch7x7dbl, branch_pool],
        axis=channel_axis,
        name='mixed' + str(5 + i))

  # mixed 7: 17 x 17 x 768
  branch1x1 = conv2d_bn(x, 192, 1, 1)

  branch7x7 = conv2d_bn(x, 192, 1, 1)
  branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
  branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

  branch7x7dbl = conv2d_bn(x, 192, 1, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch7x7, branch7x7dbl, branch_pool],
      axis=channel_axis,
      name='mixed7')

  # mixed 8: 8 x 8 x 1280
  branch3x3 = conv2d_bn(x, 192, 1, 1)
  branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding='valid')

  branch7x7x3 = conv2d_bn(x, 192, 1, 1)
  branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
  branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
  branch7x7x3 = conv2d_bn(
      branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')

  branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
  x = layers.concatenate(
      [branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')

  # mixed 9: 8 x 8 x 2048
  for i in range(2):
    branch1x1 = conv2d_bn(x, 320, 1, 1)

    branch3x3 = conv2d_bn(x, 384, 1, 1)
    branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
    branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
    branch3x3 = layers.concatenate(
        [branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))

    branch3x3dbl = conv2d_bn(x, 448, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
    branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
    branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
    branch3x3dbl = layers.concatenate(
        [branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate(
        [branch1x1, branch3x3, branch3x3dbl, branch_pool],
        axis=channel_axis,
        name='mixed' + str(9 + i))
  if include_top:
    # Classification block
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(classes, activation='softmax', name='predictions')(x)
  else:
    if pooling == 'avg':
      x = GlobalAveragePooling2D()(x)
    elif pooling == 'max':
      x = GlobalMaxPooling2D()(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = get_source_inputs(input_tensor)
  else:
    inputs = img_input
  # Create model.
  model = Model(inputs, x, name='inception_v3')

  # load weights
  if weights == 'imagenet':
    if include_top:
      weights_path = get_file(
          'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
          WEIGHTS_PATH,
          cache_subdir='models',
          file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
    else:
      weights_path = get_file(
          'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
          WEIGHTS_PATH_NO_TOP,
          cache_subdir='models',
          file_hash='bcbd6486424b2319ff4ef7d526e38f63')
    model.load_weights(weights_path)
  return model
Пример #4
0
def InceptionV3(include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000):
  """Instantiates the Inception v3 architecture.

  Optionally loads weights pre-trained
  on ImageNet. Note that when using TensorFlow,
  for best performance you should set
  `image_data_format='channels_last'` in your Keras config
  at ~/.keras/keras.json.
  The model and the weights are compatible with both
  TensorFlow and Theano. The data format
  convention used by the model is the one
  specified in your Keras config file.
  Note that the default input image size for this model is 299x299.

  Arguments:
      include_top: whether to include the fully-connected
          layer at the top of the network.
      weights: one of `None` (random initialization),
            'imagenet' (pre-training on ImageNet),
            or the path to the weights file to be loaded.
      input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
          to use as image input for the model.
      input_shape: optional shape tuple, only to be specified
          if `include_top` is False (otherwise the input shape
          has to be `(299, 299, 3)` (with `channels_last` data format)
          or `(3, 299, 299)` (with `channels_first` data format).
          It should have exactly 3 inputs channels,
          and width and height should be no smaller than 139.
          E.g. `(150, 150, 3)` would be one valid value.
      pooling: Optional pooling mode for feature extraction
          when `include_top` is `False`.
          - `None` means that the output of the model will be
              the 4D tensor output of the
              last convolutional layer.
          - `avg` means that global average pooling
              will be applied to the output of the
              last convolutional layer, and thus
              the output of the model will be a 2D tensor.
          - `max` means that global max pooling will
              be applied.
      classes: optional number of classes to classify images
          into, only to be specified if `include_top` is True, and
          if no `weights` argument is specified.

  Returns:
      A Keras model instance.

  Raises:
      ValueError: in case of invalid argument for `weights`,
          or invalid input shape.
  """
  if not (weights in {'imagenet', None} or os.path.exists(weights)):
    raise ValueError('The `weights` argument should be either '
                     '`None` (random initialization), `imagenet` '
                     '(pre-training on ImageNet), '
                     'or the path to the weights file to be loaded.')

  if weights == 'imagenet' and include_top and classes != 1000:
    raise ValueError('If using `weights` as imagenet with `include_top`'
                     ' as true, `classes` should be 1000')

  # Determine proper input shape
  input_shape = _obtain_input_shape(
      input_shape,
      default_size=299,
      min_size=139,
      data_format=K.image_data_format(),
      require_flatten=False,
      weights=weights)

  if input_tensor is None:
    img_input = Input(shape=input_shape)
  else:
    if not K.is_keras_tensor(input_tensor):
      img_input = Input(tensor=input_tensor, shape=input_shape)
    else:
      img_input = input_tensor

  if K.image_data_format() == 'channels_first':
    channel_axis = 1
  else:
    channel_axis = 3

  x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
  x = conv2d_bn(x, 32, 3, 3, padding='valid')
  x = conv2d_bn(x, 64, 3, 3)
  x = MaxPooling2D((3, 3), strides=(2, 2))(x)

  x = conv2d_bn(x, 80, 1, 1, padding='valid')
  x = conv2d_bn(x, 192, 3, 3, padding='valid')
  x = MaxPooling2D((3, 3), strides=(2, 2))(x)

  # mixed 0, 1, 2: 35 x 35 x 256
  branch1x1 = conv2d_bn(x, 64, 1, 1)

  branch5x5 = conv2d_bn(x, 48, 1, 1)
  branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch5x5, branch3x3dbl, branch_pool],
      axis=channel_axis,
      name='mixed0')

  # mixed 1: 35 x 35 x 256
  branch1x1 = conv2d_bn(x, 64, 1, 1)

  branch5x5 = conv2d_bn(x, 48, 1, 1)
  branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch5x5, branch3x3dbl, branch_pool],
      axis=channel_axis,
      name='mixed1')

  # mixed 2: 35 x 35 x 256
  branch1x1 = conv2d_bn(x, 64, 1, 1)

  branch5x5 = conv2d_bn(x, 48, 1, 1)
  branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch5x5, branch3x3dbl, branch_pool],
      axis=channel_axis,
      name='mixed2')

  # mixed 3: 17 x 17 x 768
  branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

  branch3x3dbl = conv2d_bn(x, 64, 1, 1)
  branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
  branch3x3dbl = conv2d_bn(
      branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')

  branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
  x = layers.concatenate(
      [branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')

  # mixed 4: 17 x 17 x 768
  branch1x1 = conv2d_bn(x, 192, 1, 1)

  branch7x7 = conv2d_bn(x, 128, 1, 1)
  branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
  branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

  branch7x7dbl = conv2d_bn(x, 128, 1, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch7x7, branch7x7dbl, branch_pool],
      axis=channel_axis,
      name='mixed4')

  # mixed 5, 6: 17 x 17 x 768
  for i in range(2):
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 160, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 160, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate(
        [branch1x1, branch7x7, branch7x7dbl, branch_pool],
        axis=channel_axis,
        name='mixed' + str(5 + i))

  # mixed 7: 17 x 17 x 768
  branch1x1 = conv2d_bn(x, 192, 1, 1)

  branch7x7 = conv2d_bn(x, 192, 1, 1)
  branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
  branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

  branch7x7dbl = conv2d_bn(x, 192, 1, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
  branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

  branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
  x = layers.concatenate(
      [branch1x1, branch7x7, branch7x7dbl, branch_pool],
      axis=channel_axis,
      name='mixed7')

  # mixed 8: 8 x 8 x 1280
  branch3x3 = conv2d_bn(x, 192, 1, 1)
  branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding='valid')

  branch7x7x3 = conv2d_bn(x, 192, 1, 1)
  branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
  branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
  branch7x7x3 = conv2d_bn(
      branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')

  branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
  x = layers.concatenate(
      [branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')

  # mixed 9: 8 x 8 x 2048
  for i in range(2):
    branch1x1 = conv2d_bn(x, 320, 1, 1)

    branch3x3 = conv2d_bn(x, 384, 1, 1)
    branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
    branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
    branch3x3 = layers.concatenate(
        [branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))

    branch3x3dbl = conv2d_bn(x, 448, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
    branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
    branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
    branch3x3dbl = layers.concatenate(
        [branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate(
        [branch1x1, branch3x3, branch3x3dbl, branch_pool],
        axis=channel_axis,
        name='mixed' + str(9 + i))
  if include_top:
    # Classification block
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(classes, activation='softmax', name='predictions')(x)
  else:
    if pooling == 'avg':
      x = GlobalAveragePooling2D()(x)
    elif pooling == 'max':
      x = GlobalMaxPooling2D()(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = get_source_inputs(input_tensor)
  else:
    inputs = img_input
  # Create model.
  model = Model(inputs, x, name='inception_v3')

  # load weights
  if weights == 'imagenet':
    if include_top:
      weights_path = get_file(
          'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
          WEIGHTS_PATH,
          cache_subdir='models',
          file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
    else:
      weights_path = get_file(
          'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
          WEIGHTS_PATH_NO_TOP,
          cache_subdir='models',
          file_hash='bcbd6486424b2319ff4ef7d526e38f63')
    model.load_weights(weights_path)
  elif weights is not None:
    model.load_weights(weights)

  return model
Пример #5
0
def _reduction_a_cell(ip, p, filters, block_id=None):
  """Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).

  Arguments:
      ip: Input tensor `x`
      p: Input tensor `p`
      filters: Number of output filters
      block_id: String block_id

  Returns:
      A Keras tensor
  """
  channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

  with K.name_scope('reduction_A_block_%s' % block_id):
    p = _adjust_block(p, ip, filters, block_id)

    h = Activation('relu')(ip)
    h = Conv2D(
        filters, (1, 1),
        strides=(1, 1),
        padding='same',
        name='reduction_conv_1_%s' % block_id,
        use_bias=False,
        kernel_initializer='he_normal')(
            h)
    h = BatchNormalization(
        axis=channel_dim,
        momentum=0.9997,
        epsilon=1e-3,
        name='reduction_bn_1_%s' % block_id)(
            h)

    with K.name_scope('block_1'):
      x1_1 = _separable_conv_block(
          h,
          filters, (5, 5),
          strides=(2, 2),
          block_id='reduction_left1_%s' % block_id)
      x1_2 = _separable_conv_block(
          p,
          filters, (7, 7),
          strides=(2, 2),
          block_id='reduction_1_%s' % block_id)
      x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)

    with K.name_scope('block_2'):
      x2_1 = MaxPooling2D(
          (3, 3),
          strides=(2, 2),
          padding='same',
          name='reduction_left2_%s' % block_id)(
              h)
      x2_2 = _separable_conv_block(
          p,
          filters, (7, 7),
          strides=(2, 2),
          block_id='reduction_right2_%s' % block_id)
      x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)

    with K.name_scope('block_3'):
      x3_1 = AveragePooling2D(
          (3, 3),
          strides=(2, 2),
          padding='same',
          name='reduction_left3_%s' % block_id)(
              h)
      x3_2 = _separable_conv_block(
          p,
          filters, (5, 5),
          strides=(2, 2),
          block_id='reduction_right3_%s' % block_id)
      x3 = add([x3_1, x3_2], name='reduction_add3_%s' % block_id)

    with K.name_scope('block_4'):
      x4 = AveragePooling2D(
          (3, 3),
          strides=(1, 1),
          padding='same',
          name='reduction_left4_%s' % block_id)(
              x1)
      x4 = add([x2, x4])

    with K.name_scope('block_5'):
      x5_1 = _separable_conv_block(
          x1, filters, (3, 3), block_id='reduction_left4_%s' % block_id)
      x5_2 = MaxPooling2D(
          (3, 3),
          strides=(2, 2),
          padding='same',
          name='reduction_right5_%s' % block_id)(
              h)
      x5 = add([x5_1, x5_2], name='reduction_add4_%s' % block_id)

    x = concatenate(
        [x2, x3, x4, x5],
        axis=channel_dim,
        name='reduction_concat_%s' % block_id)
    return x, ip
Пример #6
0
def _adjust_block(p, ip, filters, block_id=None):
  """Adjusts the input `previous path` to match the shape of the `input`.

  Used in situations where the output number of filters needs to be changed.

  Arguments:
      p: Input tensor which needs to be modified
      ip: Input tensor whose shape needs to be matched
      filters: Number of output filters to be matched
      block_id: String block_id

  Returns:
      Adjusted Keras tensor
  """
  channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
  img_dim = 2 if K.image_data_format() == 'channels_first' else -2

  ip_shape = K.int_shape(ip)

  if p is not None:
    p_shape = K.int_shape(p)

  with K.name_scope('adjust_block'):
    if p is None:
      p = ip

    elif p_shape[img_dim] != ip_shape[img_dim]:
      with K.name_scope('adjust_reduction_block_%s' % block_id):
        p = Activation('relu', name='adjust_relu_1_%s' % block_id)(p)

        p1 = AveragePooling2D(
            (1, 1),
            strides=(2, 2),
            padding='valid',
            name='adjust_avg_pool_1_%s' % block_id)(
                p)
        p1 = Conv2D(
            filters // 2, (1, 1),
            padding='same',
            use_bias=False,
            name='adjust_conv_1_%s' % block_id,
            kernel_initializer='he_normal')(
                p1)

        p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
        p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
        p2 = AveragePooling2D(
            (1, 1),
            strides=(2, 2),
            padding='valid',
            name='adjust_avg_pool_2_%s' % block_id)(
                p2)
        p2 = Conv2D(
            filters // 2, (1, 1),
            padding='same',
            use_bias=False,
            name='adjust_conv_2_%s' % block_id,
            kernel_initializer='he_normal')(
                p2)

        p = concatenate([p1, p2], axis=channel_dim)
        p = BatchNormalization(
            axis=channel_dim,
            momentum=0.9997,
            epsilon=1e-3,
            name='adjust_bn_%s' % block_id)(
                p)

    elif p_shape[channel_dim] != filters:
      with K.name_scope('adjust_projection_block_%s' % block_id):
        p = Activation('relu')(p)
        p = Conv2D(
            filters, (1, 1),
            strides=(1, 1),
            padding='same',
            name='adjust_conv_projection_%s' % block_id,
            use_bias=False,
            kernel_initializer='he_normal')(
                p)
        p = BatchNormalization(
            axis=channel_dim,
            momentum=0.9997,
            epsilon=1e-3,
            name='adjust_bn_%s' % block_id)(
                p)
  return p