def InceptionV3(include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000,
                classifier_activation='softmax',
                partition_layer='input'):
    """Instantiates the Inception v3 architecture.

  Reference paper:
  - [Rethinking the Inception Architecture for Computer Vision](
      http://arxiv.org/abs/1512.00567) (CVPR 2016)

  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in the `tf.keras.backend.image_data_format()`.

  Caution: Be sure to properly pre-process your inputs to the application.
  Please see `applications.inception_v3.preprocess_input` for an example.

  Arguments:
    include_top: Boolean, whether to include the fully-connected
      layer at the top, as the last layer of the network. Default to `True`.
    weights: One of `None` (random initialization),
      `imagenet` (pre-training on ImageNet),
      or the path to the weights file to be loaded. Default to `imagenet`.
    input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
      to use as image input for the model. `input_tensor` is useful for sharing
      inputs between multiple different networks. Default to None.
    input_shape: Optional shape tuple, only to be specified
      if `include_top` is False (otherwise the input shape
      has to be `(299, 299, 3)` (with `channels_last` data format)
      or `(3, 299, 299)` (with `channels_first` data format).
      It should have exactly 3 inputs channels,
      and width and height should be no smaller than 75.
      E.g. `(150, 150, 3)` would be one valid value.
      `input_shape` will be ignored if the `input_tensor` is provided.
    pooling: Optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` (default) means that the output of the model will be
          the 4D tensor output of the last convolutional block.
      - `avg` means that global average pooling
          will be applied to the output of the
          last convolutional block, and thus
          the output of the model will be a 2D tensor.
      - `max` means that global max pooling will be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is True, and
      if no `weights` argument is specified. Default to 1000.
    classifier_activation: A `str` or callable. The activation function to use
      on the "top" layer. Ignored unless `include_top=True`. Set
      `classifier_activation=None` to return the logits of the "top" layer.

  Returns:
    A `keras.Model` instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
    ValueError: if `classifier_activation` is not `softmax` or `None` when
      using a pretrained top layer.
  """
    '''
  if not (weights in {'imagenet', None} or os.path.exists(weights)):
    raise ValueError('The `weights` argument should be either '
                     '`None` (random initialization), `imagenet` '
                     '(pre-training on ImageNet), '
                     'or the path to the weights file to be loaded.')

  if weights == 'imagenet' and include_top and classes != 1000:
    raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
                     ' as true, `classes` should be 1000')

  # Determine proper input shape
  input_shape = imagenet_utils.obtain_input_shape(
      input_shape,
      default_size=299,
      min_size=75,
      data_format=backend.image_data_format(),
      require_flatten=include_top,
      weights=weights)

  if input_tensor is None:
    img_input = layers.Input(shape=input_shape)
  else:
    if not backend.is_keras_tensor(input_tensor):
      img_input = layers.Input(tensor=input_tensor, shape=input_shape)
    else:
      img_input = input_tensor

  if backend.image_data_format() == 'channels_first':
    channel_axis = 1
  else:
    channel_axis = 3
 '''
    channel_axis = 3
    img_input = layers.Input(shape=input_shape, name="input_start")

    flag = False
    if partition_layer == 'input':
        flag = True
        x = img_input
        x = conv2d_bn(x, 32, 3, 3, strides=(2, 2), padding='valid', name='1')

    if flag or partition_layer == 'conv2d':
        if partition_layer == 'conv2d':
            x = img_input
            flag = True
        x = conv2d_bn(x, 32, 3, 3, padding='valid', name='2')

    if flag or partition_layer == 'conv2d_1':
        if partition_layer == 'conv2d_1':
            x = img_input
            flag = True
        x = conv2d_bn(x, 64, 3, 3, name='3')

    if flag or partition_layer == 'conv2d_2':
        if partition_layer == 'conv2d_2':
            x = img_input
            flag = True
        x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    if flag or partition_layer == 'max_pooling2d':
        if partition_layer == 'max_pooling2d':
            x = img_input
            flag = True
        x = conv2d_bn(x, 80, 1, 1, padding='valid', name='4')

    if flag or partition_layer == 'conv2d_3':
        if partition_layer == 'conv2d_3':
            x = img_input
            flag = True
        x = conv2d_bn(x, 192, 3, 3, padding='valid', name='5')
    if flag or partition_layer == 'conv2d_4':
        if partition_layer == 'conv2d_4':
            x = img_input
            flag = True
        x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    # mixed 0: 35 x 35 x 256
    if flag or partition_layer == 'max_pooling2d_1':
        # print("=====================partition_layer", partition_layer)
        if partition_layer == 'max_pooling2d_1':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 64, 1, 1, name='6')
        branch5x5 = conv2d_bn(x, 48, 1, 1, name='7')
        branch5x5 = conv2d_bn(branch5x5, 64, 5, 5, name='8')

        branch3x3dbl = conv2d_bn(x, 64, 1, 1, name='9')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, name='10')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, name='11')

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)

        branch_pool = conv2d_bn(branch_pool, 32, 1, 1, name='12')
        x = layers.concatenate(
            [branch1x1, branch5x5, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed0')

    # mixed 1: 35 x 35 x 288
    if flag or partition_layer == 'mixed0':

        if partition_layer == 'mixed0':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 64, 1, 1, name='13')

        branch5x5 = conv2d_bn(x, 48, 1, 1, name='14')
        branch5x5 = conv2d_bn(branch5x5, 64, 5, 5, name='15')

        branch3x3dbl = conv2d_bn(x, 64, 1, 1, name='16')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, name='17')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, name='18')

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 64, 1, 1, name='19')
        x = layers.concatenate(
            [branch1x1, branch5x5, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed1')

    # mixed 2: 35 x 35 x 288
    if flag or partition_layer == 'mixed1':
        if partition_layer == 'mixed1':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 64, 1, 1, name='20')

        branch5x5 = conv2d_bn(x, 48, 1, 1, name='21')
        branch5x5 = conv2d_bn(branch5x5, 64, 5, 5, name='22')

        branch3x3dbl = conv2d_bn(x, 64, 1, 1, name='23')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, name='24')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, name='25')

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)

        branch_pool = conv2d_bn(branch_pool, 64, 1, 1, name='26')
        x = layers.concatenate(
            [branch1x1, branch5x5, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed2')

    # mixed 3: 17 x 17 x 768
    if flag or partition_layer == 'mixed2':
        if partition_layer == 'mixed2':
            x = img_input
            flag = True
        branch3x3 = conv2d_bn(x,
                              384,
                              3,
                              3,
                              strides=(2, 2),
                              padding='valid',
                              name='27')
        branch3x3dbl = conv2d_bn(x, 64, 1, 1, name='28')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, name='29')
        branch3x3dbl = conv2d_bn(branch3x3dbl,
                                 96,
                                 3,
                                 3,
                                 strides=(2, 2),
                                 padding='valid',
                                 name='30')

        branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool],
                               axis=channel_axis,
                               name='mixed3')

    # mixed 4: 17 x 17 x 768
    if flag or partition_layer == 'mixed3':
        if partition_layer == 'mixed3':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 192, 1, 1, name='31')

        branch7x7 = conv2d_bn(x, 128, 1, 1, name='32')
        branch7x7 = conv2d_bn(branch7x7, 128, 1, 7, name='33')
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1, name='34')

        branch7x7dbl = conv2d_bn(x, 128, 1, 1, name='35')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1, name='36')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7, name='37')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1, name='38')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7, name='39')

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)

        branch_pool = conv2d_bn(branch_pool, 192, 1, 1, name='40')
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed4')

    # mixed 5, 6: 17 x 17 x 768
    if flag or partition_layer == 'mixed4':
        if partition_layer == 'mixed4':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 192, 1, 1, name='41')
        branch7x7 = conv2d_bn(x, 160, 1, 1, name='42')
        branch7x7 = conv2d_bn(branch7x7, 160, 1, 7, name='43')
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1, name='44')

        branch7x7dbl = conv2d_bn(x, 160, 1, 1, name='45')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1, name='46')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7, name='47')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1, name='48')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7, name='49')

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1, name='50')
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(5))
    if flag or partition_layer == 'mixed5':
        if partition_layer == 'mixed5':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 192, 1, 1, name='51')

        branch7x7 = conv2d_bn(x, 160, 1, 1, name='52')
        branch7x7 = conv2d_bn(branch7x7, 160, 1, 7, name='53')
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1, name='54')

        branch7x7dbl = conv2d_bn(x, 160, 1, 1, name='55')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1, name='56')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7, name='57')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1, name='58')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7, name='59')
        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1, name='60')
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(6))

    # mixed 7: 17 x 17 x 768
    if flag or partition_layer == 'mixed6':
        if partition_layer == 'mixed6':
            flag = True
            x = img_input
        branch1x1 = conv2d_bn(x, 192, 1, 1, name='61')

        branch7x7 = conv2d_bn(x, 192, 1, 1, name='62')
        branch7x7 = conv2d_bn(branch7x7, 192, 1, 7, name='63')
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1, name='64')

        branch7x7dbl = conv2d_bn(x, 192, 1, 1, name='65')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1, name='66')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7, name='67')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1, name='68')
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7, name='69')

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1, name='70')
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed7')

    # mixed 8: 8 x 8 x 1280
    if flag or partition_layer == 'mixed7':
        if partition_layer == 'mixed7':
            x = img_input
            flag = True
        branch3x3 = conv2d_bn(x, 192, 1, 1, name='71')
        branch3x3 = conv2d_bn(branch3x3,
                              320,
                              3,
                              3,
                              strides=(2, 2),
                              padding='valid',
                              name='72')

        branch7x7x3 = conv2d_bn(x, 192, 1, 1, name='73')
        branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7, name='74')
        branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1, name='75')
        branch7x7x3 = conv2d_bn(branch7x7x3,
                                192,
                                3,
                                3,
                                strides=(2, 2),
                                padding='valid',
                                name='76')

        branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = layers.concatenate([branch3x3, branch7x7x3, branch_pool],
                               axis=channel_axis,
                               name='mixed8')
    # mixed 9: 8 x 8 x 2048
    if flag or partition_layer == 'mixed8':
        if partition_layer == 'mixed8':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 320, 1, 1, name='77')
        branch3x3 = conv2d_bn(x, 384, 1, 1, name='78')
        branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3, name='79')
        branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1, name='80')
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
                                       axis=channel_axis,
                                       name='mixed9_' + str(0))

        branch3x3dbl = conv2d_bn(x, 448, 1, 1, name='81')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3, name='82')
        branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3, name='83')
        branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1, name='84')
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                          axis=channel_axis)
        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)

        branch_pool = conv2d_bn(branch_pool, 192, 1, 1, name='85')
        x = layers.concatenate(
            [branch1x1, branch3x3, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(9))
    if flag or partition_layer == 'mixed9':
        if partition_layer == 'mixed9':
            x = img_input
            flag = True
        branch1x1 = conv2d_bn(x, 320, 1, 1, name='86')
        branch3x3 = conv2d_bn(x, 384, 1, 1, name='87')
        branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3, name='88')
        branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1, name='89')
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
                                       axis=channel_axis,
                                       name='mixed9_' + str(1))
        branch3x3dbl = conv2d_bn(x, 448, 1, 1, name='90')
        branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3, name='91')
        branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3, name='92')
        branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1, name='93')
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                          axis=channel_axis)
        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1, name='94')
        x = layers.concatenate(
            [branch1x1, branch3x3, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(10))
    if partition_layer == 'mixed10' or flag:
        if partition_layer == 'mixed10':
            x = img_input
            flag = True
    if include_top:
        # Classification block
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        imagenet_utils.validate_activation(classifier_activation, weights)
        x = layers.Dense(classes,
                         activation=classifier_activation,
                         name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = training.Model(inputs, x, name='inception_v3')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            weights_path = data_utils.get_file(
                'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
        else:
            weights_path = data_utils.get_file(
                'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='bcbd6486424b2319ff4ef7d526e38f63')
        model.load_weights(weights_path, by_name=True, skip_mismatch=True)
    elif weights is not None:
        model.load_weights(weights)

    return model
示例#2
0
tf.logging.set_verbosity(tf.logging.INFO)
validation_data = data.validation.images, data.validation.labels

# Model

model = models.Sequential()
model.add(layers.InputLayer(input_shape=(img_size_flat,)))
model.add(layers.Reshape(img_shape_full))
model.add(layers.Conv2D(
    kernel_size=5,
    strides=1,
    filters=16,
    padding='same',
    activation=activation,
    name='layer_conv1'))
model.add(layers.MaxPooling2D(pool_size=2, strides=2))
model.add(layers.Conv2D(
    kernel_size=5,
    strides=1,
    filters=36,
    padding='same',
    activation=activation,
    name='layer_conv2'))
model.add(layers.MaxPooling2D(pool_size=2, strides=2))
model.add(layers.Flatten())
for i in range(num_dense_layers):
    name = 'layer_dense_{0}'.format(i+1)
    model.add(layers.Dense(
        num_dense_nodes,
        activation=activation,
        name=name))
示例#3
0
def _reduction_a_cell(ip, p, filters, block_id=None):
    channel_dim = -1

    with backend.name_scope('reduction_A_block_%s' % block_id):
        p = _adjust_block(p, ip, filters, block_id)

        h = layers.Activation('relu')(ip)
        h = layers.Conv2D(filters, (1, 1),
                          strides=(1, 1),
                          padding='same',
                          name='reduction_conv_1_%s' % block_id,
                          use_bias=False,
                          kernel_initializer='he_normal')(h)
        h = layers.BatchNormalization(axis=channel_dim,
                                      momentum=0.9997,
                                      epsilon=1e-3,
                                      name='reduction_bn_1_%s' % block_id)(h)
        h3 = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(h, 3),
                                  name='reduction_pad_1_%s' % block_id)(h)

        with backend.name_scope('block_1'):
            x1_1 = _separable_conv_block(h,
                                         filters, (5, 5),
                                         strides=(2, 2),
                                         block_id='reduction_left1_%s' %
                                         block_id)
            x1_2 = _separable_conv_block(p,
                                         filters, (7, 7),
                                         strides=(2, 2),
                                         block_id='reduction_right1_%s' %
                                         block_id)
            x1 = layers.add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)

        with backend.name_scope('block_2'):
            x2_1 = layers.MaxPooling2D(
                (3, 3),
                strides=(2, 2),
                padding='valid',
                name='reduction_left2_%s' % block_id)(h3)
            x2_2 = _separable_conv_block(p,
                                         filters, (7, 7),
                                         strides=(2, 2),
                                         block_id='reduction_right2_%s' %
                                         block_id)
            x2 = layers.add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)

        with backend.name_scope('block_3'):
            x3_1 = layers.AveragePooling2D(
                (3, 3),
                strides=(2, 2),
                padding='valid',
                name='reduction_left3_%s' % block_id)(h3)
            x3_2 = _separable_conv_block(p,
                                         filters, (5, 5),
                                         strides=(2, 2),
                                         block_id='reduction_right3_%s' %
                                         block_id)
            x3 = layers.add([x3_1, x3_2], name='reduction_add3_%s' % block_id)

        with backend.name_scope('block_4'):
            x4 = layers.AveragePooling2D(
                (3, 3),
                strides=(1, 1),
                padding='same',
                name='reduction_left4_%s' % block_id)(x1)
            x4 = layers.add([x2, x4])

        with backend.name_scope('block_5'):
            x5_1 = _separable_conv_block(x1,
                                         filters, (3, 3),
                                         block_id='reduction_left4_%s' %
                                         block_id)
            x5_2 = layers.MaxPooling2D(
                (3, 3),
                strides=(2, 2),
                padding='valid',
                name='reduction_right5_%s' % block_id)(h3)
            x5 = layers.add([x5_1, x5_2], name='reduction_add4_%s' % block_id)

        x = layers.concatenate([x2, x3, x4, x5],
                               axis=channel_dim,
                               name='reduction_concat_%s' % block_id)
        return x, ip
示例#4
0
def ResNet(stack_fn,
           preact,
           use_bias,
           model_name='resnet',
           include_top=True,
           weights='imagenet',
           input_tensor=None,
           input_shape=None,
           pooling=None,
           classes=1000,
           **kwargs):
    """Instantiates the ResNet, ResNetV2, and ResNeXt architecture.

  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in your Keras config at `~/.keras/keras.json`.

  Arguments:
    stack_fn: a function that returns output tensor for the
      stacked residual blocks.
    preact: whether to use pre-activation or not
      (True for ResNetV2, False for ResNet and ResNeXt).
    use_bias: whether to use biases for convolutional layers or not
      (True for ResNet and ResNetV2, False for ResNeXt).
    model_name: string, model name.
    include_top: whether to include the fully-connected
      layer at the top of the network.
    weights: one of `None` (random initialization),
      'imagenet' (pre-training on ImageNet),
      or the path to the weights file to be loaded.
    input_tensor: optional Keras tensor
      (i.e. output of `layers.Input()`)
      to use as image input for the model.
    input_shape: optional shape tuple, only to be specified
      if `include_top` is False (otherwise the input shape
      has to be `(224, 224, 3)` (with `channels_last` data format)
      or `(3, 224, 224)` (with `channels_first` data format).
      It should have exactly 3 inputs channels.
    pooling: optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` means that the output of the model will be
          the 4D tensor output of the
          last convolutional layer.
      - `avg` means that global average pooling
          will be applied to the output of the
          last convolutional layer, and thus
          the output of the model will be a 2D tensor.
      - `max` means that global max pooling will
          be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is True, and
      if no `weights` argument is specified.
    **kwargs: For backwards compatibility only.

  Returns:
    A Keras model instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
  """
    if 'layers' in kwargs:
        global layers
        layers = kwargs.pop('layers')
    if kwargs:
        raise ValueError('Unknown argument(s): %s' % (kwargs, ))
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=224,
        min_size=32,
        data_format=backend.image_data_format(),
        require_flatten=include_top,
        weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)),
                             name='conv1_pad')(img_input)
    x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias,
                      name='conv1_conv')(x)

    if not preact:
        x = layers.BatchNormalization(axis=bn_axis,
                                      epsilon=1.001e-5,
                                      name='conv1_bn')(x)
        x = layers.Activation('relu', name='conv1_relu')(x)

    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
    x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)

    x = stack_fn(x)

    if preact:
        x = layers.BatchNormalization(axis=bn_axis,
                                      epsilon=1.001e-5,
                                      name='post_bn')(x)
        x = layers.Activation('relu', name='post_relu')(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='probs')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = training.Model(inputs, x, name=model_name)

    # Load weights.
    if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
        if include_top:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'
            file_hash = WEIGHTS_HASHES[model_name][0]
        else:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
            file_hash = WEIGHTS_HASHES[model_name][1]
        weights_path = data_utils.get_file(file_name,
                                           BASE_WEIGHTS_PATH + file_name,
                                           cache_subdir='models',
                                           file_hash=file_hash)
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
def ResNet(stack_fn,
           use_bias,
           model_name='resnet',
           include_top=True,
           weights='imagenet',
           input_tensor=None,
           input_shape=None,
           pooling=None,
           deep_stem=False,
           stem_width=None,
           classes=1000,
           classifier_activation='softmax',
           **kwargs):
  """Instantiates the ResNeSt, ResNeXt, and Wide ResNet architecture.

  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in your Keras config at `~/.keras/keras.json`.

  Caution: Be sure to properly pre-process your inputs to the application.
  Please see `applications.resnet.preprocess_input` for an example.

  Arguments:
    stack_fn: a function that returns output tensor for the
      stacked residual blocks.
    use_bias: whether to use biases for convolutional layers or not
      (True for ResNet and ResNetV2, False for ResNeXt).
    model_name: string, model name.
    include_top: whether to include the fully-connected
      layer at the top of the network.
    weights: one of `None` (random initialization),
      'imagenet' (pre-training on ImageNet),
      or the path to the weights file to be loaded.
    input_tensor: optional Keras tensor
      (i.e. output of `layers.Input()`)
      to use as image input for the model.
    input_shape: optional shape tuple, only to be specified
      if `include_top` is False (otherwise the input shape
      has to be `(224, 224, 3)` (with `channels_last` data format)
      or `(3, 224, 224)` (with `channels_first` data format).
      It should have exactly 3 inputs channels.
    pooling: optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` means that the output of the model will be
          the 4D tensor output of the
          last convolutional layer.
      - `avg` means that global average pooling
          will be applied to the output of the
          last convolutional layer, and thus
          the output of the model will be a 2D tensor.
      - `max` means that global max pooling will
          be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is True, and
      if no `weights` argument is specified.
    classifier_activation: A `str` or callable. The activation function to use
      on the "top" layer. Ignored unless `include_top=True`. Set
      `classifier_activation=None` to return the logits of the "top" layer.
    **kwargs: For backwards compatibility only.
  Returns:
    A `keras.Model` instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
    ValueError: if `classifier_activation` is not `softmax` or `None` when
      using a pretrained top layer.
  """
  if 'layers' in kwargs:
    global layers
    layers = kwargs.pop('layers')
  if kwargs:
    raise ValueError('Unknown argument(s): %s' % (kwargs,))
  if not (weights in {'imagenet', 'ssl', 'swsl', None} or os.path.exists(weights)):
    raise ValueError('The `weights` argument should be either '
                     '`None` (random initialization), `imagenet` '
                     '(pre-training on ImageNet), `ssl` '
                     '(semi-supervised), `swsl` '
                     '(semi-weakly supervised), '
                     'or the path to the weights file to be loaded.')

  if (weights == 'imagenet' or weights == 'ssl' or weights == 'swsl') and include_top and classes != 1000:
    raise ValueError('If using `weights` as `"imagenet"`, '
                     'or `weights` as `"ssl"`, '
                     'or `weights` as `"swsl"`, '
                     'with `include_top` '
                     ' as true, `classes` should be 1000')

  # Determine proper input shape
  input_shape = imagenet_utils.obtain_input_shape(
      input_shape,
      default_size=224,
      min_size=32,
      data_format=backend.image_data_format(),
      require_flatten=include_top,
      weights=weights)

  if input_tensor is None:
    img_input = layers.Input(shape=input_shape)
  else:
    if not backend.is_keras_tensor(input_tensor):
      img_input = layers.Input(tensor=input_tensor, shape=input_shape)
    else:
      img_input = input_tensor

  bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

  if deep_stem:
    # Deep stem based off of ResNet-D
    x = layers.ZeroPadding2D(
      padding=((1, 1), (1, 1)), name='conv1_0_pad')(img_input)
    x = layers.Conv2D(stem_width, 3, strides=2, use_bias=use_bias, name='conv1_0_conv')(x)
    x = layers.BatchNormalization(
      axis=bn_axis, epsilon=1.001e-5, name='conv1_0_bn')(x)
    x = layers.Activation('relu', name='conv1_0_relu')(x)

    x = layers.ZeroPadding2D(
      padding=((1, 1), (1, 1)), name='conv1_1_pad')(x)
    x = layers.Conv2D(stem_width, 3, strides=1, use_bias=use_bias, name='conv1_1_conv')(x)
    x = layers.BatchNormalization(
      axis=bn_axis, epsilon=1.001e-5, name='conv1_1_bn')(x)
    x = layers.Activation('relu', name='conv1_1_relu')(x)

    x = layers.ZeroPadding2D(
      padding=((1, 1), (1, 1)), name='conv1_2_pad')(x)
    x = layers.Conv2D(stem_width * 2, 3, strides=1, use_bias=use_bias, name='conv1_2_conv')(x)
    x = layers.BatchNormalization(
      axis=bn_axis, epsilon=1.001e-5, name='conv1_2_bn')(x)
    x = layers.Activation('relu', name='conv1_2_relu')(x)
  else:
    x = layers.ZeroPadding2D(
        padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
    x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)

    x = layers.BatchNormalization(
        axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
    x = layers.Activation('relu', name='conv1_relu')(x)

  x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
  x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)

  x = stack_fn(x)

  if include_top:
    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    imagenet_utils.validate_activation(classifier_activation, weights)
    x = layers.Dense(classes, activation=classifier_activation,
                     name='predictions')(x)
  else:
    if pooling == 'avg':
      x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    elif pooling == 'max':
      x = layers.GlobalMaxPooling2D(name='max_pool')(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = layer_utils.get_source_inputs(input_tensor)
  else:
    inputs = img_input

  # Create model.
  model = training.Model(inputs, x, name=model_name)

  # Load weights.
  global BASE_WEIGHTS_PATH
  if 'resnest' in model_name:
    BASE_WEIGHTS_PATH += 'v0.2.0/'
  elif 'resnet' in model_name and 'wide' not in model_name:
    BASE_WEIGHTS_PATH += 'v0.3.0/'
  else:
    BASE_WEIGHTS_PATH += 'v0.1.0/'

  if (weights == 'imagenet') and (model_name in IMAGENET_WEIGHTS_HASHES):
    if include_top:
      file_name = model_name + '_imagenet_top.h5'
      file_hash = IMAGENET_WEIGHTS_HASHES[model_name][0]
    else:
      file_name = model_name + '_imagenet_notop.h5'
      file_hash = IMAGENET_WEIGHTS_HASHES[model_name][1]
    weights_path = data_utils.get_file(
        file_name,
        BASE_WEIGHTS_PATH + file_name,
        cache_subdir='models',
        file_hash=file_hash)
    model.load_weights(weights_path)

  elif (weights == 'ssl') and (model_name in SSL_WEIGHTS_HASHES):
    if include_top:
      file_name = model_name + '_ssl_top.h5'
      file_hash = IMAGENET_WEIGHTS_HASHES[model_name][0]
    else:
      file_name = model_name + '_ssl_notop.h5'
      file_hash = IMAGENET_WEIGHTS_HASHES[model_name][1]
    weights_path = data_utils.get_file(
        file_name,
        BASE_WEIGHTS_PATH + file_name,
        cache_subdir='models',
        file_hash=file_hash)
    model.load_weights(weights_path)

  elif (weights == 'swsl') and (model_name in SWSL_WEIGHTS_HASHES):
    if include_top:
      file_name = model_name + '_swsl_top.h5'
      file_hash = SWSL_WEIGHTS_HASHES[model_name][0]
    else:
      file_name = model_name + '_swsl_notop.h5'
      file_hash = SWSL_WEIGHTS_HASHES[model_name][1]
    weights_path = data_utils.get_file(
        file_name,
        BASE_WEIGHTS_PATH + file_name,
        cache_subdir='models',
        file_hash=file_hash)
    model.load_weights(weights_path)
  elif weights is not None:
    model.load_weights(weights)

  return model
示例#6
0
    def __init__(self, num_classes=10, dtype="float32", batch_size=None):
        super(CustomModel, self).__init__(name="resnet50")

        if backend.image_data_format() == "channels_first":
            self._lambda = layers.Lambda(
                lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
                name="transpose",
            )
            bn_axis = 1
            data_format = "channels_first"
        else:
            bn_axis = 3
            data_format = "channels_last"

        self._padding = layers.ZeroPadding2D(padding=(3, 3),
                                             data_format=data_format,
                                             name="zero_pad")
        self._conv2d_1 = layers.Conv2D(
            64,
            (7, 7),
            strides=(2, 2),
            padding="valid",
            use_bias=False,
            kernel_initializer="he_normal",
            kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
            name="conv1",
        )
        self._bn_1 = layers.BatchNormalization(
            axis=bn_axis,
            momentum=BATCH_NORM_DECAY,
            epsilon=BATCH_NORM_EPSILON,
            name="bn_conv1",
        )
        self._activation_1 = layers.Activation("relu")
        self._maxpooling2d = layers.MaxPooling2D((3, 3),
                                                 strides=(2, 2),
                                                 padding="same")

        self._conv_block_1 = ConvBlock(3, [64, 64, 256],
                                       stage=2,
                                       block="a",
                                       strides=(1, 1))
        self._identity_block_1 = IdentityBlock(3, [64, 64, 256],
                                               stage=2,
                                               block="b")
        self._identity_block_2 = IdentityBlock(3, [64, 64, 256],
                                               stage=2,
                                               block="c")

        self._conv_block_2 = ConvBlock(3, [128, 128, 512], stage=3, block="a")
        self._identity_block_3 = IdentityBlock(3, [128, 128, 512],
                                               stage=3,
                                               block="b")
        self._identity_block_4 = IdentityBlock(3, [128, 128, 512],
                                               stage=3,
                                               block="c")
        self._identity_block_5 = IdentityBlock(3, [128, 128, 512],
                                               stage=3,
                                               block="d")

        self._conv_block_3 = ConvBlock(3, [256, 256, 1024], stage=4, block="a")
        self._identity_block_6 = IdentityBlock(3, [256, 256, 1024],
                                               stage=4,
                                               block="b")
        self._identity_block_7 = IdentityBlock(3, [256, 256, 1024],
                                               stage=4,
                                               block="c")
        self._identity_block_8 = IdentityBlock(3, [256, 256, 1024],
                                               stage=4,
                                               block="d")
        self._identity_block_9 = IdentityBlock(3, [256, 256, 1024],
                                               stage=4,
                                               block="e")
        self._identity_block_10 = IdentityBlock(3, [256, 256, 1024],
                                                stage=4,
                                                block="f")

        self._conv_block_4 = ConvBlock(3, [512, 512, 2048], stage=5, block="a")
        self._identity_block_11 = IdentityBlock(3, [512, 512, 2048],
                                                stage=5,
                                                block="b")
        self._identity_block_12 = IdentityBlock(3, [512, 512, 2048],
                                                stage=5,
                                                block="c")

        rm_axes = ([1, 2] if backend.image_data_format() == "channels_last"
                   else [2, 3])
        self._lamba_2 = layers.Lambda(lambda x: backend.mean(x, rm_axes),
                                      name="reduce_mean")
        self._dense = layers.Dense(
            num_classes,
            kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
            bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
            name="fc1000",
        )
        self._activation_2 = layers.Activation("softmax")
示例#7
0
def ResNet50(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000,
             **kwargs):
    """Instantiates the ResNet50 architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    # global backend, layers, models, keras_utils
    # backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=32,
                                      data_format=backend.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    with tf.name_scope("input_layer") as scope:
        if input_tensor is None:
            img_input = layers.Input(shape=input_shape)
        else:
            if not backend.is_keras_tensor(input_tensor):
                img_input = layers.Input(tensor=input_tensor,
                                         shape=input_shape)
            else:
                img_input = input_tensor
        if backend.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1

    with tf.name_scope("resnet") as scope:
        x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
        x = layers.Conv2D(64, (7, 7),
                          strides=(2, 2),
                          padding='valid',
                          kernel_initializer='he_normal',
                          name='conv1')(x)
        x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = layers.Activation('relu')(x)
        x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
        x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

        with tf.name_scope("module_0") as scope:
            x = conv_block(x,
                           3, [64, 64, 256],
                           stage=2,
                           block='a',
                           strides=(1, 1))
            x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
            x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        with tf.name_scope("module_1") as scope:
            x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
            x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
            x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
            x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

        with tf.name_scope("module_2") as scope:
            x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

        with tf.name_scope("module_3") as scope:
            x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
            x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
            x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        with tf.name_scope("top_layer") as scope:
            if include_top:
                x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
                x = layers.Dense(classes, activation='softmax',
                                 name='fc1000')(x)
            else:
                if pooling == 'avg':
                    x = layers.GlobalAveragePooling2D()(x)
                elif pooling == 'max':
                    x = layers.GlobalMaxPooling2D()(x)
                else:
                    warnings.warn(
                        'The output shape of `ResNet50(include_top=False)` '
                        'has been changed since Keras 2.2.0.')

        with tf.name_scope("input_layer") as scope:
            # Ensure that the model takes into account
            # any potential predecessors of `input_tensor`.
            if input_tensor is not None:
                inputs = keras_utils.get_source_inputs(input_tensor)
            else:
                inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name='resnet50')

    # Load weights.
    # if weights == 'imagenet':
    #     if include_top:
    #         weights_path = keras_utils.get_file(
    #             'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
    #             WEIGHTS_PATH,
    #             cache_subdir='models',
    #             md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
    #     else:
    #         weights_path = keras_utils.get_file(
    #             'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
    #             WEIGHTS_PATH_NO_TOP,
    #             cache_subdir='models',
    #             md5_hash='a268eb855778b3df3c7506639542a6af')
    #     model.load_weights(weights_path)
    #     if backend.backend() == 'theano':
    #         keras_utils.convert_all_kernels_in_model(model)
    # elif weights is not None:
    #     model.load_weights(weights)

    return model
示例#8
0
import tensorflow as tf
from tensorflow.python.keras import layers

model = tf.keras.Sequential([
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Dropout(0.25),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Dropout(0.25),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Dropout(0.25),
    layers.Flatten(),
    layers.Dense(256, activation='relu'),
    layers.Dense(10, activation='softmax')
])

num_classes = 10

model.compile(optimizer=tf.train.RMSPropOptimizer(0.0001, decay=1e-6),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

(trainX, trainY), (testX, testY) = tf.keras.datasets.cifar10.load_data()

trainY = tf.keras.utils.to_categorical(trainY, num_classes=num_classes)
testY = tf.keras.utils.to_categorical(testY, num_classes=num_classes)

model.fit(trainX, trainY, batch_size=32, epochs=10)
示例#9
0
def InceptionV3(input_shape=None):
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=299,
        min_size=75,
        data_format=backend.image_data_format(),
        require_flatten=True)

    img_input = layers.Input(shape=input_shape)

    if backend.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
    x = conv2d_bn(x, 32, 3, 3, padding='valid')
    x = conv2d_bn(x, 64, 3, 3)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv2d_bn(x, 80, 1, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, 3, padding='valid')
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    # mixed 0: 35 x 35 x 256
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed0')

    # mixed 1: 35 x 35 x 288
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed1')

    # mixed 2: 35 x 35 x 288
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed2')

    # mixed 3: 17 x 17 x 768
    branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl,
                             96,
                             3,
                             3,
                             strides=(2, 2),
                             padding='valid')

    branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed3')

    # mixed 4: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 128, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 128, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed4')

    # mixed 5, 6: 17 x 17 x 768
    for i in range(2):
        branch1x1 = conv2d_bn(x, 192, 1, 1)

        branch7x7 = conv2d_bn(x, 160, 1, 1)
        branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

        branch7x7dbl = conv2d_bn(x, 160, 1, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(5 + i))

    # mixed 7: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 192, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 192, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed7')

    # mixed 8: 8 x 8 x 1280
    branch3x3 = conv2d_bn(x, 192, 1, 1)
    branch3x3 = conv2d_bn(branch3x3,
                          320,
                          3,
                          3,
                          strides=(2, 2),
                          padding='valid')

    branch7x7x3 = conv2d_bn(x, 192, 1, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3,
                            192,
                            3,
                            3,
                            strides=(2, 2),
                            padding='valid')

    branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch7x7x3, branch_pool],
                           axis=channel_axis,
                           name='mixed8')

    # mixed 9: 8 x 8 x 2048
    for i in range(2):
        branch1x1 = conv2d_bn(x, 320, 1, 1)

        branch3x3 = conv2d_bn(x, 384, 1, 1)
        branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
        branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
                                       axis=channel_axis,
                                       name='mixed9_' + str(i))

        branch3x3dbl = conv2d_bn(x, 448, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
        branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
        branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                          axis=channel_axis)

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch3x3, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(9 + i))
    # Classification block
    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    imagenet_utils.validate_activation('softmax', None)
    x = layers.Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)

    # Create model.
    model = training.Model(img_input, x, name='inception_v3')

    return model
示例#10
0
def InceptionResNetV2(include_top=True,
                      weights='imagenet',
                      input_tensor=None,
                      input_shape=None,
                      pooling=None,
                      classes=1000,
                      classifier_activation='softmax',
                      **kwargs):
    """Instantiates the Inception-ResNet v2 architecture.

  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in your Keras config at `~/.keras/keras.json`.

  Caution: Be sure to properly pre-process your inputs to the application.
  Please see `applications.inception_resnet_v2.preprocess_input` for an example.

  Arguments:
    include_top: whether to include the fully-connected
      layer at the top of the network.
    weights: one of `None` (random initialization),
      'imagenet' (pre-training on ImageNet),
      or the path to the weights file to be loaded.
    input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
      to use as image input for the model.
    input_shape: optional shape tuple, only to be specified
      if `include_top` is `False` (otherwise the input shape
      has to be `(299, 299, 3)` (with `'channels_last'` data format)
      or `(3, 299, 299)` (with `'channels_first'` data format).
      It should have exactly 3 inputs channels,
      and width and height should be no smaller than 75.
      E.g. `(150, 150, 3)` would be one valid value.
    pooling: Optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` means that the output of the model will be
          the 4D tensor output of the last convolutional block.
      - `'avg'` means that global average pooling
          will be applied to the output of the
          last convolutional block, and thus
          the output of the model will be a 2D tensor.
      - `'max'` means that global max pooling will be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is `True`, and
      if no `weights` argument is specified.
    classifier_activation: A `str` or callable. The activation function to use
      on the "top" layer. Ignored unless `include_top=True`. Set
      `classifier_activation=None` to return the logits of the "top" layer.
    **kwargs: For backwards compatibility only.

  Returns:
    A `keras.Model` instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
    ValueError: if `classifier_activation` is not `softmax` or `None` when
      using a pretrained top layer.
  """
    if 'layers' in kwargs:
        global layers
        layers = kwargs.pop('layers')
    if kwargs:
        raise ValueError('Unknown argument(s): %s' % (kwargs, ))
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=299,
        min_size=75,
        data_format=backend.image_data_format(),
        require_flatten=include_top,
        weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Stem block: 35 x 35 x 192
    x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
    x = conv2d_bn(x, 32, 3, padding='valid')
    x = conv2d_bn(x, 64, 3)
    x = layers.MaxPooling2D(3, strides=2)(x)
    x = conv2d_bn(x, 80, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, padding='valid')
    x = layers.MaxPooling2D(3, strides=2)(x)

    # Mixed 5b (Inception-A block): 35 x 35 x 320
    branch_0 = conv2d_bn(x, 96, 1)
    branch_1 = conv2d_bn(x, 48, 1)
    branch_1 = conv2d_bn(branch_1, 64, 5)
    branch_2 = conv2d_bn(x, 64, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
    x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)

    # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
    for block_idx in range(1, 11):
        x = inception_resnet_block(x,
                                   scale=0.17,
                                   block_type='block35',
                                   block_idx=block_idx)

    # Mixed 6a (Reduction-A block): 17 x 17 x 1088
    branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 256, 3)
    branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
    branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)

    # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
    for block_idx in range(1, 21):
        x = inception_resnet_block(x,
                                   scale=0.1,
                                   block_type='block17',
                                   block_idx=block_idx)

    # Mixed 7a (Reduction-B block): 8 x 8 x 2080
    branch_0 = conv2d_bn(x, 256, 1)
    branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
    branch_2 = conv2d_bn(x, 256, 1)
    branch_2 = conv2d_bn(branch_2, 288, 3)
    branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
    branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)

    # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
    for block_idx in range(1, 10):
        x = inception_resnet_block(x,
                                   scale=0.2,
                                   block_type='block8',
                                   block_idx=block_idx)
    x = inception_resnet_block(x,
                               scale=1.,
                               activation=None,
                               block_type='block8',
                               block_idx=10)

    # Final convolution block: 8 x 8 x 1536
    x = conv2d_bn(x, 1536, 1, name='conv_7b')

    if include_top:
        # Classification block
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        imagenet_utils.validate_activation(classifier_activation, weights)
        x = layers.Dense(classes,
                         activation=classifier_activation,
                         name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = training.Model(inputs, x, name='inception_resnet_v2')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
            weights_path = data_utils.get_file(
                fname,
                BASE_WEIGHT_URL + fname,
                cache_subdir='models',
                file_hash='e693bd0210a403b3192acc6073ad2e96')
        else:
            fname = ('inception_resnet_v2_weights_'
                     'tf_dim_ordering_tf_kernels_notop.h5')
            weights_path = data_utils.get_file(
                fname,
                BASE_WEIGHT_URL + fname,
                cache_subdir='models',
                file_hash='d19885ff4a710c122648d3b5c3b684e4')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
示例#11
0
def deep_cnn(features_shape, num_classes, activation_function='relu'):
    model = models.Sequential()

    model.add(
        layers.InputLayer(input_shape=features_shape,
                          name='Inputs',
                          dtype='float32'))

    # Block 1
    model.add(
        layers.Conv2D(input_shape=features_shape,
                      filters=32,
                      kernel_size=(3, 3),
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      name='Block1_Convolution'))
    model.add(
        layers.MaxPooling2D(pool_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='Block1_MaxPooling'))
    model.add(layers.BatchNormalization(name='Block1_BatchNormalization'))

    # Block 2
    model.add(
        layers.Conv2D(filters=32,
                      kernel_size=(3, 3),
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      name='Block2_Convolution'))
    model.add(
        layers.MaxPooling2D(pool_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='Block2_MaxPooling'))
    model.add(layers.BatchNormalization(name='Block2_BatchNormalization'))

    # Block 3
    model.add(
        layers.Conv2D(filters=32,
                      kernel_size=(3, 3),
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      name='Block3_Convolution'))
    model.add(
        layers.MaxPooling2D(pool_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='Block3_MaxPooling'))
    model.add(layers.BatchNormalization(name='Block3_BatchNormalization'))

    # Flatten
    model.add(layers.Flatten(name='Flatten'))

    # Dense block
    model.add(
        layers.Dense(units=64,
                     activation=activation_function,
                     name='Dense_Dense'))
    model.add(layers.BatchNormalization(name='Dense_BatchNormalization'))
    model.add(layers.Dropout(rate=0.2, name='Dense_Dropout'))

    # Predictions
    model.add(
        layers.Dense(units=num_classes,
                     activation='softmax',
                     name='Predictions_Dense'))

    # Print network summary
    model.summary()

    return model
示例#12
0
def VGG19(include_top=True,
          weights='imagenet',
          input_tensor=None,
          input_shape=None,
          pooling=None,
          classes=1000):
  """Instantiates the VGG19 architecture.

  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in your Keras config at `~/.keras/keras.json`.

  Arguments:
    include_top: whether to include the 3 fully-connected
      layers at the top of the network.
    weights: one of `None` (random initialization),
        'imagenet' (pre-training on ImageNet),
        or the path to the weights file to be loaded.
    input_tensor: optional Keras tensor
      (i.e. output of `layers.Input()`)
      to use as image input for the model.
    input_shape: optional shape tuple, only to be specified
      if `include_top` is False (otherwise the input shape
      has to be `(224, 224, 3)`
      (with `channels_last` data format)
      or `(3, 224, 224)` (with `channels_first` data format).
      It should have exactly 3 inputs channels,
      and width and height should be no smaller than 32.
      E.g. `(200, 200, 3)` would be one valid value.
    pooling: Optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` means that the output of the model will be
          the 4D tensor output of the
          last convolutional block.
      - `avg` means that global average pooling
          will be applied to the output of the
          last convolutional block, and thus
          the output of the model will be a 2D tensor.
      - `max` means that global max pooling will
          be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is True, and
      if no `weights` argument is specified.

  Returns:
    A Keras model instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
  """
  if not (weights in {'imagenet', None} or os.path.exists(weights)):
    raise ValueError('The `weights` argument should be either '
                     '`None` (random initialization), `imagenet` '
                     '(pre-training on ImageNet), '
                     'or the path to the weights file to be loaded.')

  if weights == 'imagenet' and include_top and classes != 1000:
    raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
                     ' as true, `classes` should be 1000')
  # Determine proper input shape
  input_shape = imagenet_utils.obtain_input_shape(
      input_shape,
      default_size=224,
      min_size=32,
      data_format=backend.image_data_format(),
      require_flatten=include_top,
      weights=weights)

  if input_tensor is None:
    img_input = layers.Input(shape=input_shape)
  else:
    if not backend.is_keras_tensor(input_tensor):
      img_input = layers.Input(tensor=input_tensor, shape=input_shape)
    else:
      img_input = input_tensor
  # Block 1
  x = layers.Conv2D(
      64, (3, 3), activation='relu', padding='same', name='block1_conv1')(
          img_input)
  x = layers.Conv2D(
      64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
  x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

  # Block 2
  x = layers.Conv2D(
      128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
  x = layers.Conv2D(
      128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
  x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

  # Block 3
  x = layers.Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
  x = layers.Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
  x = layers.Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
  x = layers.Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
  x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

  # Block 4
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
  x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

  # Block 5
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
  x = layers.Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)
  x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

  if include_top:
    # Classification block
    x = layers.Flatten(name='flatten')(x)
    x = layers.Dense(4096, activation='relu', name='fc1')(x)
    x = layers.Dense(4096, activation='relu', name='fc2')(x)
    x = layers.Dense(classes, activation='softmax', name='predictions')(x)
  else:
    if pooling == 'avg':
      x = layers.GlobalAveragePooling2D()(x)
    elif pooling == 'max':
      x = layers.GlobalMaxPooling2D()(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = layer_utils.get_source_inputs(input_tensor)
  else:
    inputs = img_input
  # Create model.
  model = training.Model(inputs, x, name='vgg19')

  # Load weights.
  if weights == 'imagenet':
    if include_top:
      weights_path = data_utils.get_file(
          'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
          WEIGHTS_PATH,
          cache_subdir='models',
          file_hash='cbe5617147190e668d6c5d5026f83318')
    else:
      weights_path = data_utils.get_file(
          'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
          WEIGHTS_PATH_NO_TOP,
          cache_subdir='models',
          file_hash='253f8cb515780f3b799900260a226db6')
    model.load_weights(weights_path)
  elif weights is not None:
    model.load_weights(weights)

  return model
示例#13
0
x_train /= 255.0
x_test /= 255.0

# Convert class vectors to class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# Define model
model = keras.Sequential()
model.add(
    layers.Convolution2D(16, (3, 3),
                         padding='same',
                         input_shape=x_train.shape[1:],
                         activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Convolution2D(32, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(100, activation='softmax'))

# Train model
adam = tf.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
              optimizer=adam,
              metrics=['top_k_categorical_accuracy'])
print(model.summary())
示例#14
0
import tensorflow as tf
from tensorflow.python.keras.models import Model
from tensorflow.python.keras import layers
from tensorflow.python.keras import backend
from tensorflow.python.keras.optimizer_v2.adam import Adam
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
from tensorflow.python.keras.callbacks import TensorBoard

from dataset import IMG_HEIGHT, IMG_WIDTH, letters, BATCH_SIZE

backend.clear_session()

img_input = layers.Input(shape=(IMG_HEIGHT, IMG_WIDTH, 1))
x = layers.Conv2D(32, 3, activation="relu")(img_input)
x = layers.Conv2D(64, 3, activation="relu")(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.Dropout(0.25)(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)
output = layers.Dense(52, activation="softmax")(x)

model = Model(img_input, output)
model.compile(loss="categorical_cross",
  optimizer=Adam(),
  metrics=['accuracy'])

training_set=list(filter(lambda letter: random.random() < 0.6, letters))
validation_set=list(filter(lambda cur: random.random() < 0.05, map(lambda letter: (letter.input, letter.output), letters)))

model.fit(
示例#15
0
def resnet50(num_classes, dtype='float32', batch_size=None):
    # TODO(tfboyd): add training argument, just lik resnet56.
    """Instantiates the ResNet50 architecture.

  Args:
    num_classes: `int` number of classes for image classification.

  Returns:
      A Keras model instance.
  """
    input_shape = (224, 224, 3)
    img_input = layers.Input(shape=input_shape,
                             dtype=dtype,
                             batch_size=batch_size)

    if backend.image_data_format() == 'channels_first':
        x = layers.Lambda(
            lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
            name='transpose')(img_input)
        bn_axis = 1
    else:  # channels_last
        x = img_input
        bn_axis = 3

    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
    x = layers.Conv2D(64, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      use_bias=False,
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  scale=False,
                                  momentum=BATCH_NORM_DECAY,
                                  epsilon=BATCH_NORM_EPSILON,
                                  name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    x = layers.Dense(num_classes,
                     kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                     bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                     name='fc1000')(x)
    # TODO(reedwm): Remove manual casts once mixed precision can be enabled with a
    # single line of code.
    x = backend.cast(x, 'float32')
    x = layers.Activation('softmax')(x)

    # Create model.
    return models.Model(img_input, x, name='resnet50')
    def encoder_block(input_tensor, num_filters):
        encoder = conv_block(input_tensor, num_filters)
        encoder_pool = layers.MaxPooling2D((1, 2), strides=(1, 2))(encoder)

        return encoder_pool, encoder
示例#17
0
def DenseNet(blocks,
             include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):
    """Instantiates the DenseNet architecture.

  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in your Keras config at `~/.keras/keras.json`.

  Arguments:
    blocks: numbers of building blocks for the four dense layers.
    include_top: whether to include the fully-connected
      layer at the top of the network.
    weights: one of `None` (random initialization),
      'imagenet' (pre-training on ImageNet),
      or the path to the weights file to be loaded.
    input_tensor: optional Keras tensor
      (i.e. output of `layers.Input()`)
      to use as image input for the model.
    input_shape: optional shape tuple, only to be specified
      if `include_top` is False (otherwise the input shape
      has to be `(224, 224, 3)` (with `'channels_last'` data format)
      or `(3, 224, 224)` (with `'channels_first'` data format).
      It should have exactly 3 inputs channels,
      and width and height should be no smaller than 32.
      E.g. `(200, 200, 3)` would be one valid value.
    pooling: optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` means that the output of the model will be
          the 4D tensor output of the
          last convolutional block.
      - `avg` means that global average pooling
          will be applied to the output of the
          last convolutional block, and thus
          the output of the model will be a 2D tensor.
      - `max` means that global max pooling will
          be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is True, and
      if no `weights` argument is specified.

  Returns:
    A Keras model instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
  """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=224,
        min_size=32,
        data_format=backend.image_data_format(),
        require_flatten=include_top,
        weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
    x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
    x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)

    x = dense_block(x, blocks[0], name='conv2')
    x = transition_block(x, 0.5, name='pool2')
    x = dense_block(x, blocks[1], name='conv3')
    x = transition_block(x, 0.5, name='pool3')
    x = dense_block(x, blocks[2], name='conv4')
    x = transition_block(x, 0.5, name='pool4')
    x = dense_block(x, blocks[3], name='conv5')

    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
    x = layers.Activation('relu', name='relu')(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    if blocks == [6, 12, 24, 16]:
        model = training.Model(inputs, x, name='densenet121')
    elif blocks == [6, 12, 32, 32]:
        model = training.Model(inputs, x, name='densenet169')
    elif blocks == [6, 12, 48, 32]:
        model = training.Model(inputs, x, name='densenet201')
    else:
        model = training.Model(inputs, x, name='densenet')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            if blocks == [6, 12, 24, 16]:
                weights_path = data_utils.get_file(
                    'densenet121_weights_tf_dim_ordering_tf_kernels.h5',
                    DENSENET121_WEIGHT_PATH,
                    cache_subdir='models',
                    file_hash='9d60b8095a5708f2dcce2bca79d332c7')
            elif blocks == [6, 12, 32, 32]:
                weights_path = data_utils.get_file(
                    'densenet169_weights_tf_dim_ordering_tf_kernels.h5',
                    DENSENET169_WEIGHT_PATH,
                    cache_subdir='models',
                    file_hash='d699b8f76981ab1b30698df4c175e90b')
            elif blocks == [6, 12, 48, 32]:
                weights_path = data_utils.get_file(
                    'densenet201_weights_tf_dim_ordering_tf_kernels.h5',
                    DENSENET201_WEIGHT_PATH,
                    cache_subdir='models',
                    file_hash='1ceb130c1ea1b78c3bf6114dbdfd8807')
        else:
            if blocks == [6, 12, 24, 16]:
                weights_path = data_utils.get_file(
                    'densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
                    DENSENET121_WEIGHT_PATH_NO_TOP,
                    cache_subdir='models',
                    file_hash='30ee3e1110167f948a6b9946edeeb738')
            elif blocks == [6, 12, 32, 32]:
                weights_path = data_utils.get_file(
                    'densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5',
                    DENSENET169_WEIGHT_PATH_NO_TOP,
                    cache_subdir='models',
                    file_hash='b8c4d4c20dd625c148057b9ff1c1176b')
            elif blocks == [6, 12, 48, 32]:
                weights_path = data_utils.get_file(
                    'densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5',
                    DENSENET201_WEIGHT_PATH_NO_TOP,
                    cache_subdir='models',
                    file_hash='c13680b51ded0fb44dff2d8f86ac8bb1')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
示例#18
0
#                MODEL WEAK FOREGROUNDS
#################################################################

##CNN model
model = tf.keras.Sequential()
model.add(
    layers.Conv2D(16, (7, 7),
                  input_shape=(size, size, 1),
                  strides=(1, 1),
                  padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(32, (7, 7), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2)))
model.add(layers.Activation('relu'))
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(1, (1, 1), activation='sigmoid'))

model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
              loss='binary_crossentropy',
              metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary()

history = model.fit(data_full[:lenj, :, :, :],
                    data_label[:lenj, :, :, :],
                    epochs=12,
                    batch_size=50,
示例#19
0
def VGG16(
    include_top=True,
    weights='imagenet',
    input_tensor=None,
    input_shape=None,
    pooling=None,
    classes=1000,
    classifier_activation='softmax',
):
    """Instantiates the VGG16 model.

  By default, it loads weights pre-trained on ImageNet. Check 'weights' for
  other options.

  This model can be built both with 'channels_first' data format
  (channels, height, width) or 'channels_last' data format
  (height, width, channels).

  The default input size for this model is 224x224.

  Caution: Be sure to properly pre-process your inputs to the application.
  Please see `applications.vgg16.preprocess_input` for an example.

  Arguments:
      include_top: whether to include the 3 fully-connected
          layers at the top of the network.
      weights: one of `None` (random initialization),
            'imagenet' (pre-training on ImageNet),
            or the path to the weights file to be loaded.
      input_tensor: optional Keras tensor
          (i.e. output of `layers.Input()`)
          to use as image input for the model.
      input_shape: optional shape tuple, only to be specified
          if `include_top` is False (otherwise the input shape
          has to be `(224, 224, 3)`
          (with `channels_last` data format)
          or `(3, 224, 224)` (with `channels_first` data format).
          It should have exactly 3 input channels,
          and width and height should be no smaller than 32.
          E.g. `(200, 200, 3)` would be one valid value.
      pooling: Optional pooling mode for feature extraction
          when `include_top` is `False`.
          - `None` means that the output of the model will be
              the 4D tensor output of the
              last convolutional block.
          - `avg` means that global average pooling
              will be applied to the output of the
              last convolutional block, and thus
              the output of the model will be a 2D tensor.
          - `max` means that global max pooling will
              be applied.
      classes: optional number of classes to classify images
          into, only to be specified if `include_top` is True, and
          if no `weights` argument is specified.
      classifier_activation: A `str` or callable. The activation function to use
          on the "top" layer. Ignored unless `include_top=True`. Set
          `classifier_activation=None` to return the logits of the "top" layer.

  Returns:
    A `keras.Model` instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
    ValueError: if `classifier_activation` is not `softmax` or `None` when
      using a pretrained top layer.
  """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')
    # Determine proper input shape
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=224,
        min_size=32,
        data_format=backend.image_data_format(),
        require_flatten=include_top,
        weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    # Block 1
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(img_input)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    if include_top:
        # Classification block
        x = layers.Flatten(name='flatten')(x)
        x = layers.Dense(4096, activation='relu', name='fc1')(x)
        x = layers.Dense(4096, activation='relu', name='fc2')(x)

        imagenet_utils.validate_activation(classifier_activation, weights)
        x = layers.Dense(classes,
                         activation=classifier_activation,
                         name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = training.Model(inputs, x, name='vgg16')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            weights_path = data_utils.get_file(
                'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='64373286793e3c8b2b4e3219cbf3544b')
        else:
            weights_path = data_utils.get_file(
                'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='6d6bbae143d832006294945121d1f1fc')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model