Example #1
0
    def __init__(self,
                 dropout_keep_prob=0.4,
                 bottleneck_layer_size=512,
                 use_center_loss=False,
                 num_classes=8631):
        super(ThawedModel4, self).__init__()

        self.reduction_b = ReductionB()

        self.block8 = [Block8(1792, scale=0.20, activation_fn=tf.nn.relu \
                              if i < 5 else None) for i in range(6)]

        self.avg_pool = layers.GlobalAveragePooling2D(name='AvgPool_1a_global')

        self.flatten = layers.Flatten()
        self.dropout = layers.Dropout(1 - dropout_keep_prob)

        self.embedding = layers.Dense(bottleneck_layer_size,
                                      name='Bottleneck',
                                      use_bias=False)
        self.last_bn = layers.BatchNormalization()
        # pylint: disable=line-too-long
        self.classifier = layers.Dense(
            num_classes,
            kernel_initializer=initializers.glorot_uniform,
            kernel_regularizer=regularizers.l2(5e-4),
            name='Logits')
        self.activation = layers.Activation('softmax')

        self.use_center_loss = use_center_loss
        if use_center_loss:
            self.center_loss = CenterLoss(num_classes, 512)
Example #2
0
def create_model():
    conv_base = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=INPUT_SHAPE)
    conv_base.trainable = False
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.GlobalAveragePooling2D())
    model.add(
        layers.Dense(512,
                     activation='relu',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.add(layers.Dropout(0.5))
    model.add(
        layers.Dense(512,
                     activation='relu',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.add(layers.Dropout(0.5))
    model.add(
        layers.Dense(2,
                     activation='softmax',
                     kernel_initializer='uniform',
                     kernel_regularizer=reg))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=LEARN_RATE, momentum=0.9),
                  metrics=['accuracy'])
    model.summary()
    return model
Example #3
0
def ResNet50(input_tensor=None,
             pooling=None,
             **kwargs):
    """Instantiates the ResNet50 architecture.
    # Arguments       
    # Returns
        A Keras model instance.
    """
    # Input arguments
    include_top = get_varargin(kwargs, 'include_top', True)
    nb_classes = get_varargin(kwargs, 'nb_classes', 1000)
    default_input_shape = _obtain_input_shape(None,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)
    input_shape = get_varargin(kwargs, 'input_shape', default_input_shape)
    if input_tensor is None:
        img_input = KL.Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = KL.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
            
    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1        

    x = KL.ZeroPadding2D((3, 3))(img_input)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = KL.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = KL.Activation('relu')(x)
    x = KL.MaxPooling2D((3, 3), strides=(2, 2))(x)
    
    for stage, nb_block in zip([2,3,4,5], [3,4,6,3]):
        for blk in range(nb_block):
            conv_block = True if blk == 0 else False
            strides = (2,2) if stage>2 and blk==0 else (1,1)           
            x = identity_block(x, stage = stage, block_id = blk + 1,
                               conv_block = conv_block, strides = strides)
            
    x = KL.AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = KL.Flatten()(x)
        x = KL.Dense(nb_classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = KL.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = KL.GlobalMaxPooling2D()(x)
    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
        
    # Create model.
    model = Model(inputs, x, name='resnet50')
    return model
Example #4
0
def googlenet(input_shape=(img_size, img_size, channel), classes=100):
    main_input = layers.Input(input_shape)

    x = layers.Conv2D(192, (3, 3), padding='same')(main_input)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = inception_model(x, 64, 96, 128, 16, 32, 32)
    x = inception_model(x, 128, 128, 192, 32, 96, 64)
    x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                            padding='same')(x)

    x = inception_model(x, 192, 96, 208, 16, 48, 64)
    x = inception_model(x, 160, 112, 224, 24, 64, 64)
    x = inception_model(x, 128, 128, 256, 24, 64, 64)
    x = inception_model(x, 112, 144, 288, 32, 64, 64)
    x = inception_model(x, 256, 160, 320, 32, 128, 128)
    x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                            padding='same')(x)

    x = inception_model(x, 256, 160, 320, 32, 128, 128)
    x = inception_model(x, 384, 192, 384, 48, 128, 128)

    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dropout(rate=0.4)(x)
    x = layers.Dense(classes, activation='softmax')(x)

    return Model(main_input, x)
Example #5
0
def SplitAttentionConv2D(x,
                         filters,
                         kernel_size,
                         stride=1,
                         padding=(0, 0),
                         groups=1,
                         use_bias=True,
                         radix=2,
                         name=None):
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    reduction_factor = 4
    inter_filters = max(filters * radix // reduction_factor, 32)
    x = layers.ZeroPadding2D((padding, padding), name=name + '_splat_pad')(x)
    x = layers.Conv2D(filters * radix,
                      kernel_size=kernel_size,
                      strides=stride,
                      groups=groups * radix,
                      use_bias=use_bias,
                      name=name + '_0_splat_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_0_splat_bn')(x)
    x = layers.Activation('relu', name=name + '_0_splat_relu')(x)

    splits = layers.Lambda(lambda x: tf.split(x, radix, bn_axis),
                           name=name + '_0_splat_split')(x)
    x = layers.Add(name=name + '_0_splat_add')(splits)
    x = layers.GlobalAveragePooling2D(name=name + '_0_splat_pool')(x)
    shape = (1, 1, filters) if bn_axis == 3 else (filters, 1, 1)
    x = layers.Reshape(shape, name=name + '_0_splat_reshape')(x)

    x = layers.Conv2D(inter_filters,
                      kernel_size=1,
                      groups=groups,
                      name=name + '_1_splat_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_1_splat_bn')(x)
    x = layers.Activation('relu', name=name + '_1_splat_relu')(x)

    # Attention
    x = layers.Conv2D(filters * radix,
                      kernel_size=1,
                      groups=groups,
                      name=name + '_2_splat_conv')(x)
    x = RSoftmax(x, filters * radix, radix, groups, name=name)
    x = layers.Lambda(lambda x: tf.split(x, radix, bn_axis),
                      name=name + '_1_splat_split')(x)
    x = layers.Lambda(
        lambda x: [tf.stack(x[0], axis=bn_axis),
                   tf.stack(x[1], axis=bn_axis)],
        name=name + '_splat_stack')([splits, x])
    x = layers.Multiply(name=name + '_splat_mult')(x)
    x = layers.Lambda(lambda x: tf.unstack(x, axis=bn_axis),
                      name=name + '_splat_unstack')(x)
    x = layers.Add(name=name + '_splat_add')(x)
    return x
Example #6
0
    def small_densenet(self,
                       img_input_shape=(64, 64, 3),
                       blocks=[6, 12, 24, 16],
                       weight_decay=1e-4,
                       kernel_initializer='he_normal',
                       init_filters=None,
                       reduction=None,
                       growth_rate=None,
                       init_stride=None):
        img_input = layers.Input(shape=(img_input_shape))

        x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
        x = layers.Conv2D(init_filters,
                          3,
                          strides=init_stride,
                          use_bias=False,
                          kernel_initializer=kernel_initializer,
                          kernel_regularizer=l2(weight_decay),
                          name='conv1/conv')(x)
        x = layers.BatchNormalization(axis=3,
                                      epsilon=1.001e-5,
                                      name='conv1/bn')(x)
        x = layers.Activation('relu', name='conv1/relu')(x)
        x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
        x = layers.AveragePooling2D(3, strides=2, name='pool1')(x)

        for i, block in enumerate(blocks):
            scope_num_str = str(i + 2)
            x = self.dense_block(x,
                                 block,
                                 name='conv' + scope_num_str,
                                 growth_rate=growth_rate,
                                 weight_decay=weight_decay,
                                 kernel_initializer=kernel_initializer)
            if i != len(blocks) - 1:
                x = self.transition_block(
                    x,
                    reduction,
                    name='pool' + scope_num_str,
                    weight_decay=weight_decay,
                    kernel_initializer=kernel_initializer)
        x = layers.BatchNormalization(axis=3, epsilon=1.001e-5, name='bn')(x)
        x = layers.Activation('relu', name='relu')(x)

        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(self.cat_max,
                         activation='softmax',
                         kernel_initializer=kernel_initializer,
                         name='fc')(x)

        model = Model(img_input, x)
        model.compile(optimizer=Adam(lr=self.lr),
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])

        return model
Example #7
0
def convnet(input_shape,
            output_size,
            conv_filters=(64, 64, 64),
            conv_kernel_sizes=(3, 3, 3),
            conv_strides=(2, 2, 2),
            use_global_average_pool=False,
            normalization_type=None,
            normalization_kwargs={},
            downsampling_type='conv',
            name='convnet',
            *args,
            **kwargs):
    assert downsampling_type in ('pool', 'conv'), downsampling_type

    img_input = layers.Input(shape=input_shape, dtype=tf.float32)
    x = img_input

    for (conv_filter, conv_kernel_size,
         conv_stride) in zip(conv_filters, conv_kernel_sizes, conv_strides):
        x = layers.Conv2D(
            filters=conv_filter,
            kernel_size=conv_kernel_size,
            strides=(conv_stride if downsampling_type == 'conv' else 1),
            padding="SAME",
            activation='linear',
            *args,
            **kwargs)(x)

        if normalization_type == 'batch':
            x = layers.BatchNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'layer':
            x = LayerNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'group':
            x = GroupNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'instance':
            x = InstanceNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'weight':
            raise NotImplementedError(normalization_type)
        else:
            assert normalization_type is None, normalization_type

        x = layers.LeakyReLU()(x)

        if downsampling_type == 'pool' and conv_stride > 1:
            x = getattr(tf.keras.layers, 'AvgPool2D')(pool_size=conv_stride,
                                                      strides=conv_stride)(x)

    if use_global_average_pool:
        x = layers.GlobalAveragePooling2D(name='average_pool')(x)
    else:
        x = tf.keras.layers.Flatten()(x)

    model = models.Model(img_input, x, name=name)
    model.summary()
    return model
Example #8
0
def build(input_shape, num_outputs, block_type, repetitions, filter=64, k=1):
    '''ResNet モデルを作成する Factory クラス

    Arguments:
        input_shape: 入力の形状
        num_outputs: ネットワークの出力数
        block_type : residual block の種類 ('basic' or 'bottleneck')
        repetitions: 同じ residual block を何個反復させるか
    '''
    # block_type に応じて、residual block を生成する関数を選択する。
    if block_type == 'basic':
        block_fn = basic_block
    elif block_type == 'bottleneck':
        block_fn = bottleneck_block

    # モデルを作成する。
    ##############################################
    input = layers.Input(shape=input_shape)

    # conv1 (batch normalization -> ReLU -> conv)
    conv1 = utils.compose(
        ResNetConv2D(filters=filter,
                     kernel_size=(7, 7),
                     strides=(2, 2),
                     input_shape=input_shape), layers.BatchNormalization(),
        layers.Activation('relu'))(input)

    # pool
    pool1 = layers.MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='same')(conv1)

    # conv2_x, conv3_x, conv4_x, conv5_x
    block = pool1
    for i, r in enumerate(repetitions):
        block = residual_blocks(block_fn,
                                filters=filter * k,
                                repetitions=r,
                                is_first_layer=(i == 0))(block)
        filter *= 2

    # batch normalization -> ReLU
    block = utils.compose(layers.BatchNormalization(),
                          layers.Activation('relu'))(block)

    # global average pooling
    pool2 = layers.GlobalAveragePooling2D()(block)

    # dense
    fc1 = layers.Dense(units=num_outputs,
                       kernel_initializer='he_normal',
                       activation='softmax')(pool2)

    return models.Model(inputs=input, outputs=fc1)
def resnet50(num_classes, batch_size=None):
    """Instantiates the ResNet50 architecture.

    Args:
      num_classes: `int` number of classes for image classification.
      batch_size: Size of the batches for each step.

    Returns:
        A Keras model instance.
    """

    input_shape = (224, 224, 3)
    img_input = layers.Input(shape=input_shape, batch_size=batch_size)
    x = img_input

    if backend.image_data_format() == 'channels_first':
        x = layers.Permute((3, 1, 2))(x)
        bn_axis = 1
    else:  # channels_last
        bn_axis = -1

    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
    x = layers.Conv2D(64, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = resnet_block(x,
                     size=3,
                     kernel_size=3,
                     filters=256,
                     stage=2,
                     conv_strides=(1, 1))
    x = resnet_block(x, size=4, kernel_size=3, filters=512, stage=3)
    x = resnet_block(x, size=6, kernel_size=3, filters=1024, stage=4)
    x = resnet_block(x, size=3, kernel_size=3, filters=2048, stage=5)

    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(num_classes,
                     kernel_initializer='he_normal',
                     kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                     name='fc1000')(x)

    # A softmax that is followed by the model loss must be done cannot be done
    # in float16 due to numeric issues. So we pass dtype=float32.
    x = layers.Activation('softmax', dtype='float32')(x)

    # Create model.
    return models.Model(img_input, x, name='resnet50')
Example #10
0
 def __init__(self, out_channels, norm_layer, norm_kwargs, conv_trainable=True, **kwargs):
     super(ASPPPooling, self).__init__()
     self.gap = tf.keras.Sequential([
         klayers.GlobalAveragePooling2D(),
         klayers.Lambda(lambda x: tf.keras.backend.expand_dims(x, 1)),
         klayers.Lambda(lambda x: tf.keras.backend.expand_dims(x, 1)),
         klayers.Conv2D(out_channels, kernel_size=1, kernel_initializer='he_uniform', use_bias=False,
                        trainable=conv_trainable),
         norm_layer(**({} if norm_kwargs is None else norm_kwargs)),
         klayers.ReLU()
     ])
Example #11
0
 def top(self, x, name="Top"):
     with tf.variable_scope(name, use_resource=True):
         if self.top_width > 0:
             x = self.conv(x, 1, 1, self.top_width, bias=False)
             x = self.norm(x)
             x = self.activation(x)
         x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
         if self.dropout_rate > 0:
             x = tf.layers.Dropout(self.dropout_rate, name='top_dropout')(
                 x, training=self.is_training)
         x = self.fc(x, self.num_classes, 'logits', seed=None)
         return x
Example #12
0
def nin(input_shape=(img_size, img_size, channel), classes=100, single=False):
    main_input = layers.Input(input_shape)
    x = nin_block(main_input, (3, 3), 64, single=single)
    x = layers.MaxPooling2D()(x)
    x = nin_block(x, (3, 3), 128, single=single)
    x = layers.MaxPooling2D()(x)
    x = nin_block(x, (3, 3), 256, single=single)
    x = layers.MaxPooling2D()(x)

    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(classes, activation='softmax')(x)

    return Model(main_input, x)
Example #13
0
    def __init__(self,
                 dropout_keep_prob=0.4,
                 bottleneck_layer_size=512,
                 use_center_loss=False,
                 num_classes=8631):
        super(ThawedModel1, self).__init__()

        self.conv4 = BaseConvBlock(80, (1, 1),
                                   padding='valid',
                                   name='Conv2d_3b_1x1')
        self.conv5 = BaseConvBlock(192, (3, 3),
                                   padding='valid',
                                   name='Conv2d_4a_3x3')
        self.conv6 = BaseConvBlock(256, (3, 3),
                                   strides=(2, 2),
                                   padding='valid',
                                   name='Conv2d_4b_3x3')

        self.block35 = [Block35(256, scale=0.17) for _ in range(5)]

        self.reduction_a = ReductionA(192, 192, 256, 384)  # 256 + 256 + 384

        self.block17 = [
            Block17(256 + 256 + 384, scale=0.10) for _ in range(10)
        ]

        self.reduction_b = ReductionB()

        self.block8 = [Block8(1792, scale=0.20, activation_fn=tf.nn.relu \
                              if i < 5 else None) for i in range(6)]

        self.avg_pool = layers.GlobalAveragePooling2D(name='AvgPool_1a_global')

        self.flatten = layers.Flatten()
        self.dropout = layers.Dropout(1 - dropout_keep_prob)

        self.embedding = layers.Dense(bottleneck_layer_size,
                                      name='Bottleneck',
                                      use_bias=False)
        self.last_bn = layers.BatchNormalization()
        # pylint: disable=line-too-long
        self.classifier = layers.Dense(
            num_classes,
            kernel_initializer=initializers.glorot_uniform,
            kernel_regularizer=regularizers.l2(5e-4),
            name='Logits')
        self.activation = layers.Activation('softmax')

        self.use_center_loss = use_center_loss
        if use_center_loss:
            self.center_loss = CenterLoss(num_classes, 512)
Example #14
0
def build_model(cnn_net):
    model = Sequential()
    model.add(cnn_net)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(5, activation='sigmoid'))
    
    model.compile(
        loss='binary_crossentropy',
        optimizer=Adam(lr=0.00005),
        metrics=['accuracy']
    )
    
    return model
Example #15
0
 def call(self, inputs, training=True, mask=None):
     blocks = self.sep(inputs)
     if self.use_tfwise:
         b_attention = self.tfwise_attention(blocks, training)
     avgs = layers.GlobalAveragePooling2D()(blocks)
     attention = self.dense1(avgs)
     attention = self.dense2(attention)
     attention = tf.nn.sigmoid(attention)
     for i in range(2):
         attention = tf.expand_dims(attention, axis=1)
     attention = tf.broadcast_to(attention, blocks.shape)
     if self.use_tfwise:
         attention *= b_attention
     attention = tf.reshape(attention, inputs.shape)
     return attention
Example #16
0
def create_models():
    input = layers.Input((32, 32, 3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(10, activation="softmax")(x)

    return Model(input, x)
Example #17
0
def build_model(input_shape,
                output_num,
                use_deformable=False,
                num_deform_group=0):
    num_deform_group = None if num_deform_group == 0 else num_deform_group
    input_layer = layers.Input(shape=input_shape, dtype=tf.float32)

    x = layers.Conv2D(filters=32,
                      kernel_size=3,
                      strides=(1, 1),
                      padding="same",
                      backbone=None)(input_layer)
    x = layers.BatchNormalization(axis=-1)(x)
    x = layers.Activation("relu")(x)
    x = layers.MaxPooling2D(pool_size=2)(x)

    if use_deformable:
        x = DeformableConvLayer(filters=64,
                                kernel_size=3,
                                strides=(1, 1),
                                padding="same",
                                num_deformable_group=num_deform_group)(x)
    else:
        x = layers.Conv2D(filters=64,
                          kernel_size=3,
                          strides=(1, 1),
                          padding="same")(x)
    x = layers.BatchNormalization(axis=-1)(x)
    x = layers.Activation("relu")(x)
    x = layers.MaxPooling2D(pool_size=2)(x)

    if use_deformable:
        x = DeformableConvLayer(filters=64,
                                kernel_size=3,
                                strides=(1, 1),
                                padding="same",
                                num_deformable_group=num_deform_group)(x)
    else:
        x = layers.Conv2D(filters=64,
                          kernel_size=3,
                          strides=(1, 1),
                          padding="same")(x)
    x = layers.BatchNormalization(axis=-1)(x)
    x = layers.Activation("relu")(x)
    x = layers.GlobalAveragePooling2D()(x)

    out = layers.Dense(units=output_num, activation="softmax")(x)
    return Model(inputs=[input_layer], outputs=[out])
Example #18
0
    def __init__(self, layer_dims, num_classes):
        super(ResNet, self).__init__()
        self.stem = Sequential([
            layers.Conv2D(64, (3, 3), strides=(1, 1)),
            layers.BatchNormalization(),
            layers.Activation('relu'),
            layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
        ])

        self.layer1 = self.build_resblock(filter_num=64, blocks=layer_dims[0])
        self.layer2 = self.build_resblock(filter_num=128, blocks=layer_dims[1], stride=2)
        self.layer3 = self.build_resblock(filter_num=256, blocks=layer_dims[2], stride=2)
        self.layer4 = self.build_resblock(filter_num=512, blocks=layer_dims[3], stride=2)

        self.avg_pool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(units=num_classes)
Example #19
0
    def __init__(self, layer_dims, num_classes=10):
        super(ResNet, self).__init__()
        # 预处理层;一个63*3*3卷积层,加BN,relu激活再池化。
        self.stem = Sequential([
            layers.Conv2D(64, (3, 3), strides=(1, 1)),
            layers.BatchNormalization(),
            layers.Activation('relu'),
            layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
        ])
        # 创建4个Res Block
        self.layer1 = self.build_resblock(64, layer_dims[0])
        self.layer2 = self.build_resblock(128, layer_dims[1], stride=2)
        self.layer3 = self.build_resblock(256, layer_dims[2], stride=2)
        self.layer4 = self.build_resblock(512, layer_dims[3], stride=2)

        self.avgpool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(num_classes)
Example #20
0
def build_model2():
    effnet = efn.EfficientNetB5(weights=None,
                                include_top=False,
                                input_shape=(IMG_SIZE, IMG_SIZE, 3))
    effnet.load_weights('/home/td/桌面/efficientnet-b5_imagenet_1000_notop.h5')
    for i, layer in enumerate(effnet.layers):
        if "batch_normalization" in layer.name:
            effnet.layers[i] = layers.BatchNormalization(groups=32,
                                                         axis=-1,
                                                         epsilon=0.00001)
    model = Sequential()
    model.add(effnet)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(5, activation="elu"))
    model.add(layers.Dense(1, activation="linear"))
    model.compile(loss='mse', optimizer=Adam(lr=0.0005), metrics=['acc'])
    return model
Example #21
0
def _se_block(inputs, filters, se_ratio, prefix):
    x = layers.GlobalAveragePooling2D(name=prefix +
                                      'squeeze_excite/AvgPool')(inputs)
    if backend.image_data_format() == 'channels_first':
        x = layers.Reshape((filters, 1, 1))(x)
    else:
        x = layers.Reshape((1, 1, filters))(x)
    x = layers.Conv2D(_depth(filters * se_ratio),
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv')(x)
    x = layers.ReLU(name=prefix + 'squeeze_excite/Relu')(x)
    x = layers.Conv2D(filters,
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv_1')(x)
    x = hard_sigmoid(x)
    x = layers.Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
    return x
Example #22
0
def vgg(layer_cfg,
        input_shape=(img_size, img_size, channel),
        weight_decay=5e-4,
        use_bias=False,
        use_fc=False):

    block_num = 1
    conv_num = 1
    x = layers.Input(input_shape, name='vgg16_bn')
    main_input = x

    for layer in layer_cfg:
        if layer == 'M':
            x = layers.MaxPooling2D((2, 2),
                                    strides=(2, 2),
                                    name='block%d_pool' % block_num)(x)
            block_num += 1
            conv_num = 1
            continue

        x = layers.Conv2D(layer, (3, 3),
                          padding='same',
                          name='block%d_conv%d' % (block_num, conv_num),
                          kernel_regularizer=l2(weight_decay),
                          use_bias=use_bias)(x)
        x = layers.BatchNormalization(name='block%d_bn%d' %
                                      (block_num, conv_num))(x)
        x = layers.Activation('relu',
                              name='block%d_relu%d' % (block_num, conv_num))(x)
        conv_num += 1

    if use_fc:
        x = layers.Dense(4096)(x)
        x = layers.Activation('relu')(x)
        x = layers.Dropout(rate=0.5)(x)
        x = layers.Dense(4096)(x)
        x = layers.Activation('relu')(x)
        x = layers.Dropout(rate=0.5)(x)
    else:
        x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(num_classes, activation='softmax')(x)
    return Model(main_input, x)
Example #23
0
    def layer(input_tensor):
        # get number of channels/filters
        channels = backend.int_shape(input_tensor)[channels_axis]

        x = input_tensor

        # squeeze and excitation block in PyTorch style with
        x = layers.GlobalAveragePooling2D()(x)
        x = layers.Lambda(expand_dims,
                          arguments={'channels_axis': channels_axis})(x)
        x = layers.Conv2D(channels // reduction, (1, 1),
                          kernel_initializer='he_uniform')(x)
        x = layers.Activation('relu')(x)
        x = layers.Conv2D(channels, (1, 1), kernel_initializer='he_uniform')(x)
        x = layers.Activation('sigmoid')(x)

        # apply attention
        x = layers.Multiply()([input_tensor, x])

        return x
Example #24
0
def DenseNet121(input_shape=None):
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=224,
        min_size=32,
        data_format=backend.image_data_format(),
        require_flatten=True)

    img_input = layers.Input(shape=input_shape)

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
    x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
    x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)

    x = dense_block(x, 6, name='conv2')
    x = transition_block(x, 0.5, name='pool2')
    x = dense_block(x, 12, name='conv3')
    x = transition_block(x, 0.5, name='pool3')
    x = dense_block(x, 24, name='conv4')
    x = transition_block(x, 0.5, name='pool4')
    x = dense_block(x, 16, name='conv5')

    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
    x = layers.Activation('relu', name='relu')(x)

    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)

    imagenet_utils.validate_activation('softmax', None)
    x = layers.Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)

    # Create model.
    model = training.Model(img_input, x, name='densenet121')

    return model
Example #25
0
    def create_model(self,
                     model_name,
                     input_size,
                     embedding_size,
                     num_classes,
                     include_top=False):
        base_network = model_sets[model_name](input_shape=(input_size,
                                                           input_size, 3),
                                              include_top=include_top,
                                              weights='imagenet')

        base_network.trainable = True
        inputs = base_network.input
        x = base_network(inputs)
        x = layers.GlobalAveragePooling2D()(x)
        embedding = layers.Dense(embedding_size, name='embedding')(x)
        # logits = layers.Dense(num_classes,name='logits')(embedding)
        # model = keras.Model(inputs=inputs, outputs={'embedding':embedding, 'logits':logits})
        model = keras.Model(inputs=inputs, outputs=embedding)
        model.summary()
        return model
Example #26
0
def resnet(block,
           num_block,
           input_shape=(img_size, img_size, channel),
           weight_decay=5e-4):
    main_input = layers.Input(input_shape)

    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      kernel_regularizer=l2(weight_decay),
                      kernel_initializer='he_normal',
                      use_bias=False)(main_input)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = make_layer(x, block, 64, num_block[0], 1)
    x = make_layer(x, block, 128, num_block[1], 2)
    x = make_layer(x, block, 256, num_block[2], 2)
    x = make_layer(x, block, 512, num_block[3], 2)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(num_classes, activation='softmax')(x)

    return Model(main_input, x)
Example #27
0
def basic(num_classes, input_shape, hparams):
    return keras.Sequential([
        layers.Conv2D(16, 3, input_shape=input_shape, activation='relu'),
        layers.BatchNormalization(center=False, scale=False),

        layers.Conv2D(32, 3, 2, activation='relu'),
        layers.BatchNormalization(center=False, scale=False),

        layers.Conv2D(32, 3, 1, activation='relu'),
        layers.BatchNormalization(center=False, scale=False),

        layers.Conv2D(64, 3, 2, activation='relu'),
        layers.BatchNormalization(center=False, scale=False),

        layers.Conv2D(64, 3, 1, activation='relu'),
        layers.BatchNormalization(center=False, scale=False),

        layers.GlobalAveragePooling2D(),

        layers.Dense(num_classes, activation='softmax', name='label'),
    ])
Example #28
0
    def create_model(num_freezedLayers=16,
                     img_width=224,
                     img_height=224,
                     nb_classes=9,
                     name_fclayer="fc1",
                     noveltyDetectionLayerSize=1024,
                     optimizer=tf.keras.optimizers.SGD(lr=0.0001,
                                                       momentum=0.9),
                     loss='binary_crossentropy'):

        # create the base pre-trained model
        base_model = tf.keras.applications.VGG16(weights="imagenet",
                                                 include_top=False,
                                                 input_shape=(img_width,
                                                              img_height, 3))

        # add a global spatial average pooling layer
        x = base_model.output
        x = layers.GlobalAveragePooling2D()(x)
        x = layers.Dense(noveltyDetectionLayerSize,
                         activation='relu',
                         name=name_fclayer)(x)
        predictions = layers.Dense(nb_classes, activation='sigmoid')(x)

        # final model
        model = tf.keras.models.Model(inputs=base_model.input,
                                      outputs=predictions)

        # freeze layers for transfer learning
        for layer in model.layers[:num_freezedLayers]:
            layer.trainable = False
        for layer in model.layers[num_freezedLayers:]:
            layer.trainable = True

        # compile model and return it
        model.compile(optimizer=optimizer, loss=loss)

        return model
Example #29
0
def ResNet50V2(input_shape=None):
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=224,
        min_size=32,
        data_format=backend.image_data_format(),
        require_flatten=True)

    img_input = layers.Input(shape=input_shape)

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)),
                             name='conv1_pad')(img_input)
    x = layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv')(x)

    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
    x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)

    x = stack2(x, 64, 3, name='conv2')
    x = stack2(x, 128, 4, name='conv3')
    x = stack2(x, 256, 6, name='conv4')
    x = stack2(x, 512, 3, stride1=1, name='conv5')

    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name='post_bn')(x)
    x = layers.Activation('relu', name='post_relu')(x)

    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    imagenet_utils.validate_activation('softmax', None)
    x = layers.Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)

    # Create model.
    model = training.Model(img_input, x, name="resnet50v2")

    return model
Example #30
0
    def small_densenet(self,
                       img_input_shape=(64, 64, 3),
                       blocks=[6, 12, 24, 16]):
        img_input = Input(shape=(img_input_shape))
        x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
        x = layers.Conv2D(64, 7, strides=2, use_bias=False,
                          name='conv1/conv')(x)
        x = layers.BatchNormalization(axis=3,
                                      epsilon=1.001e-5,
                                      name='conv1/bn')(x)
        x = layers.Activation('relu', name='conv1/relu')(x)
        x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
        x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)

        x = self.dense_block(x, blocks[0], name='conv2')
        x = self.transition_block(x, 0.5, name='pool2')
        x = self.dense_block(x, blocks[1], name='conv3')
        x = self.transition_block(x, 0.5, name='pool3')
        x = self.dense_block(x, blocks[2], name='conv4')
        x = self.transition_block(x, 0.5, name='pool4')
        x = self.dense_block(x, blocks[3], name='conv5')

        x = layers.BatchNormalization(axis=3, epsilon=1.001e-5, name='bn')(x)
        x = layers.Activation('relu', name='relu')(x)

        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        # x = Lambda(lambda x: x, name = 'densenet_features')(x)
        x = layers.Dense(self.cat_max, activation='softmax', name='fc')(x)

        model = Model(img_input, x)
        # print (model.summary())
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])

        return model