Ejemplo n.º 1
0
def DenseBlock(stack,
               n_layers,
               growth_rate,
               dropout_p,
               attention_module=None,
               scope=None):
    """
  DenseBlock for DenseNet and FC-DenseNet
  Arguments:
    stack: input 4D tensor
    n_layers: number of internal layers
    growth_rate: number of feature maps per internal layer
  Returns:
    stack: current stack of feature maps (4D tensor)
    new_features: 4D tensor containing only the new feature maps generated
      in this block
  """
    with tf.name_scope(scope) as sc:
        new_features = []
        for j in range(n_layers):
            # Compute new feature maps
            layer = preact_conv(stack, growth_rate, dropout_p=dropout_p)
            new_features.append(layer)
            # Stack new layer
            stack = tf.concat([stack, layer], axis=-1)
        new_features = tf.concat(new_features, axis=-1)

        #add attention_module
        attach_attention_module(stack, attention_module, scope)

        return stack, new_features
Ejemplo n.º 2
0
def TransitionDown(inputs,
                   n_filters,
                   dropout_p=0.2,
                   attention_module=None,
                   scope=None):
    """
  Transition Down (TD) for FC-DenseNet
  Apply 1x1 BN + ReLU + conv then 2x2 max pooling
  """
    with tf.name_scope(scope) as sc:
        l = preact_conv(inputs,
                        n_filters,
                        kernel_size=[1, 1],
                        dropout_p=dropout_p)
        l = slim.pool(l, [2, 2], stride=[2, 2], pooling_type='MAX')

        # add attention_module
        attach_attention_module(l, attention_module, scope)

        return l
Ejemplo n.º 3
0
def conv_block2(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
    '''conv_block is the block that has a conv layer at shortcut

    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names

    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    '''
    nb_filter1, nb_filter2, nb_filter3 = filters
    # if K.image_dim_ordering() == 'tf':
    #     bn_axis = 3
    # else:
    #     bn_axis = 1
    bn_axis = 3
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(nb_filter1, 1, 1, subsample=strides,
                      name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter2, kernel_size, kernel_size, border_mode='same',
                      name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = Conv2D(nb_filter3, 1, 1, subsample=strides,
                             name=conv_name_base + '1')(input_tensor)
    # shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = attach_attention_module(x, 'cbam_block')
    x = add([x, shortcut])
    x = Activation('relu')(x)
    return x
Ejemplo n.º 4
0
def _wasnet_block(x, filter, strides=(2, 2), nl='RE'):

    residual = layers.Conv2D(filter,
                             kernel_size=(1, 1),
                             strides=strides,
                             padding='same')(x)
    residual = layers.BatchNormalization(axis=bn_axis)(residual)

    cbam = attach_attention_module(residual, attention_module='cbam_block')

    x = layers.SeparableConv2D(filter, (3, 3), padding='same')(x)
    x = layers.BatchNormalization(axis=bn_axis)(x)
    x = return_activation(x, nl)
    x = layers.SeparableConv2D(filter, (3, 3), padding='same')(x)
    x = layers.BatchNormalization(axis=bn_axis)(x)

    x = layers.MaxPooling2D((3, 3), strides=strides, padding='same')(x)
    x = layers.add([x, residual, cbam])

    return x
Ejemplo n.º 5
0
def identity_block2(input_tensor, kernel_size, filters, stage, block):
    '''The identity_block is the block that has no conv layer at shortcut

    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    '''
    nb_filter1, nb_filter2, nb_filter3 = filters
    bn_axis = 3
    # if K.image_dim_ordering() == 'tf':
    #     bn_axis = 3
    # else:
    #     bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter2, kernel_size, kernel_size,
                      border_mode='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = attach_attention_module(x, 'cbam_block')
    x = add([x, input_tensor])
    x = Activation('relu')(x)
    return x
def CGS(data):
    # 编码部分使用VGG166,不包含全连接层,输入图片大小定为224*224,最后一层是block5_pool(MaxPooling2D)大小为7*7*512
    # base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 13), input_tensor=data)
    # # 设置VGG模型部分为可训练
    # base.trainable = True

    x0 = layers.Conv2D(64, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block1_conv')(data)
    y = attach_attention_module(x0, 'cbam_block')
    fusion = layers.add([x0, y])
    x0 = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x0)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x1 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x2 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x3 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x4 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x5 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    # ##自己写的

    # # 14*14*512
    x = layers.Deconv2D(512, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv1')(x5)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x4])
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx1 = layers.Activation('relu')(fusion)
    # x = layers.Conv2D(512, (1, 1), padding='same', activation='relu', name='deconv1_conv4')(x)
    # y = attach_attention_module(x, 'cbam_block')
    # x = layers.add([x, y])
    # x = layers.Activation('relu')(x)

    # 28*28*256
    x = layers.Deconv2D(256, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv2')(xx1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x3])
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv3')(x)
    # x = layers.Conv2D(256, (1, 1), padding='same', activation='relu', name='deconv2_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx2 = layers.Activation('relu')(fusion)

    # 56*56*128
    x = layers.Deconv2D(128, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv3')(xx2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x2])
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv3')(x)
    # x = layers.Conv2D(128, (1, 1), padding='same', activation='relu', name='deconv3_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx3 = layers.Activation('relu')(fusion)

    # 112*112*64
    x = layers.Deconv2D(64, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv4')(xx3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x1])
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv2')(x)
    # x = layers.Conv2D(64, (1, 1), padding='same', activation='relu', name='deconv4_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx4 = layers.Activation('relu')(fusion)

    # 224*224*32
    x = layers.Deconv2D(32, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv5')(xx4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x0])
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv2')(x)
    # x = layers.Conv2D(32, (1, 1), padding='same', activation='relu', name='deconv5_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)

    # 224*224*1 结果1
    x0 = layers.Conv2D(1, (1, 1),
                       padding='same',
                       activation='relu',
                       name='conv6_end')(x)

    # concat
    d2 = layers.Lambda(lambda x: x[:, :, :, 3])(data)
    y7 = layers.Lambda(lambda x: K.expand_dims(x, axis=3))(d2)
    f = layers.Concatenate()([y7, x0])

    # optimization
    fusion1 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion1_conv1')(f)
    y = attach_attention_module(fusion1, 'cbam_block')
    fusion1 = layers.add([fusion1, y])
    fusion1 = layers.Activation('relu')(fusion1)
    fusion1 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion1_conv2')(fusion1)
    y = attach_attention_module(fusion1, 'cbam_block')
    fusion1 = layers.add([fusion1, y])
    fusion1 = layers.Activation('relu')(fusion1)
    fusion1 = layers.MaxPooling2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='fusion1_maxpooling')(fusion1)

    fusion2 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion2_conv1')(fusion1)
    y = attach_attention_module(fusion2, 'cbam_block')
    fusion2 = layers.add([fusion2, y])
    fusion2 = layers.Activation('relu')(fusion2)
    fusion2 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion2_conv2')(fusion2)
    y = attach_attention_module(fusion2, 'cbam_block')
    fusion2 = layers.add([fusion2, y])
    fusion2 = layers.Activation('relu')(fusion2)
    fusion2 = layers.MaxPooling2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='fusion2_maxpooling')(fusion2)

    fusion3 = layers.Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion3_conv1')(fusion2)
    y = attach_attention_module(fusion3, 'cbam_block')
    fusion3 = layers.add([fusion3, y])
    fusion3 = layers.Activation('relu')(fusion3)
    fusion3 = layers.Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion3_conv2')(fusion3)
    y = attach_attention_module(fusion3, 'cbam_block')
    fusion3 = layers.add([fusion3, y])
    fusion3 = layers.Activation('relu')(fusion3)
    fusion3 = layers.Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion3_conv3')(fusion3)
    y = attach_attention_module(fusion3, 'cbam_block')
    fusion3 = layers.add([fusion3, y])
    fusion3 = layers.Activation('relu')(fusion3)
    fusion3 = layers.MaxPooling2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='fusion3_maxpooling')(fusion3)

    fusion4 = layers.Deconv2D(128, (4, 4),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='fusion4_deconv')(fusion3)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)
    fusion4 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion4_conv1')(fusion4)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)
    fusion4 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion4_conv2')(fusion4)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)
    fusion4 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion4_conv3')(fusion4)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)

    fusion5 = layers.Deconv2D(64, (4, 4),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='fusion5_deconv')(fusion4)
    y = attach_attention_module(fusion5, 'cbam_block')
    fusion5 = layers.add([fusion5, y])
    fusion5 = layers.Activation('relu')(fusion5)
    fusion5 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion5_conv1')(fusion5)
    y = attach_attention_module(fusion5, 'cbam_block')
    fusion5 = layers.add([fusion5, y])
    fusion5 = layers.Activation('relu')(fusion5)
    fusion5 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion5_conv2')(fusion5)
    y = attach_attention_module(fusion5, 'cbam_block')
    fusion5 = layers.add([fusion5, y])
    fusion5 = layers.Activation('relu')(fusion5)

    fusion6 = layers.Deconv2D(32, (4, 4),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='fusion6_deconv')(fusion5)
    y = attach_attention_module(fusion6, 'cbam_block')
    fusion6 = layers.add([fusion6, y])
    fusion6 = layers.Activation('relu')(fusion6)
    fusion6 = layers.Conv2D(32, (1, 1),
                            padding='same',
                            activation='relu',
                            name='fusion6_conv1')(fusion6)
    y = attach_attention_module(fusion6, 'cbam_block')
    fusion6 = layers.add([fusion6, y])
    fusion6 = layers.Activation('relu')(fusion6)
    fusion6 = layers.Conv2D(1, (1, 1),
                            padding='same',
                            activation='relu',
                            name='fusion6_conv2')(fusion6)
    # y = attach_attention_module(fusion6, 'cbam_block')
    # fusion6 = layers.add([fusion6, y])
    # fusion6 = layers.Activation('relu')(fusion6)
    fusion = layers.Conv2D(1, (1, 1),
                           padding='same',
                           activation='sigmoid',
                           name='fusion')(fusion6)

    # # train
    # return [fusion, fusion]
    # # return [x, fusion]

    # test
    m = Model(data, fusion, name='CGS')
    return m
Ejemplo n.º 7
0
class_weight = class_weight.compute_class_weight(
    'balanced', np.unique(train_generator.classes), train_generator.classes)

print(class_weight)

print("Downloading Base Model.....")

base_model = Xception(include_top=False, weights='imagenet')

for layer in base_model.layers:
    layer.trainable = False

# get layers and add average pooling layer
## set model architechture
x = base_model.output
x = attach_attention_module(x, attention_module)
x = GlobalAveragePooling2D()(x)
x = Dropout(dropout)(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(no_of_classes, activation='softmax')(x)

lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=2,
                               min_lr=0.5e-6)

model = Model(input=base_model.input, output=predictions)
model.compile(optimizer=Adam(lr_schedule(0)),
              loss='categorical_crossentropy',
              metrics=['categorical_accuracy', 'accuracy'])
Ejemplo n.º 8
0
def res_Net50(input, classes=51, attention_module=None):
    #global backend, layers, models, keras_utils
    #backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
    #x = layers.Lambda(finite_difference)(input)
    #print(x.get_shape())
    #exit()
    if attention_module is not None:
        x = attach_attention_module(input, 'fcbam_block')
    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(input)
    x = layers.Conv2D(128, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      name='conv1_he_normal')(x)
    x = layers.BatchNormalization(name='bn_conv1_he_normal')(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad_he_normal')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    if attention_module is not None:
        x = attach_attention_module(x, attention_module)

    x = conv_block(x,
                   3, [64, 64, 256],
                   stage=2,
                   block='a_he_normal',
                   strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b_he_normal')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c_he_normal')

    if attention_module is not None:
        x = attach_attention_module(x, attention_module)

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    if attention_module is not None:
        x = attach_attention_module(x, attention_module)

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    if attention_module is not None:
        x = attach_attention_module(x, attention_module)

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)

    # linear = layers.Dense(units=512,activation='sigmoid',name='dense_layer_1')(x)
    # linear = layers.Dropout(rate=0.75)(linear)

    linear = layers.Dense(units=classes,
                          activation='softmax',
                          name='dense_layer')(x)

    model = Model(inputs=input, outputs=linear)

    weights_path = utils.get_file(
        'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
        WEIGHTS_PATH_NO_TOP,
        cache_subdir='models',
        md5_hash='a268eb855778b3df3c7506639542a6af')
    model.load_weights(weights_path, by_name=True)
    return model
Ejemplo n.º 9
0
# for layer in model.layers[:172]:
#    layer.trainable = False
# for layer in model.layers[172:]:
#    layer.trainable = True

# change this code for every attribute - set the layers to true for training
for layer in base_model.layers:
    layer.trainable = False

# for i, layer in enumerate(base_model.layers):
#    print(i, layer.name)

# color attribute layer

color_attribute = base_model.output
color_attribute = attach_attention_module(x, attention_module)
color_attribute = GlobalAveragePooling2D()(color_attribute)
# let's add a fully-connected layer
color_attribute = Dropout(dropout)(color_attribute)
color_attribute = Dense(1024, activation='relu',
                        name="attribute_color")(color_attribute)
predictions_color = Dense(no_of_classes,
                          activation='softmax',
                          name="predictions_color")(color_attribute)

model = Model(inputs=base_model.input, outputs=predictions_color)

# model.load_weights("../models/label_color/label_color_inceptionv3_41_0.37.h5")
# print ("Checkpoint loaded.")

# this is the model we will train
Ejemplo n.º 10
0
def _depthwise_conv_block_v2(inputs, pointwise_conv_filters, alpha, expansion_factor,
                             depth_multiplier=1, strides=(1, 1), bn_epsilon=1e-3,
                             bn_momentum=0.99, weight_decay=0.0, block_id=1):
    """Adds a depthwise convolution block V2.
    A depthwise convolution V2 block consists of a depthwise conv,
    batch normalization, relu6, pointwise convolution,
    batch normalization and relu6 activation.
    # Arguments
        inputs: Input tensor of shape `(rows, cols, channels)`
            (with `channels_last` data format) or
            (channels, rows, cols) (with `channels_first` data format).
        pointwise_conv_filters: Integer, the dimensionality of the output space
            (i.e. the number output of filters in the pointwise convolution).
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        expansion_factor: controls the expansion of the internal bottleneck
            blocks. Should be a positive integer >= 1
        depth_multiplier: The number of depthwise convolution output channels
            for each input channel.
            The total number of depthwise convolution output
            channels will be equal to `filters_in * depth_multiplier`.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
            Specifying any stride value != 1 is incompatible with specifying
            any `dilation_rate` value != 1.
        bn_epsilon: Epsilon value for BatchNormalization
        bn_momentum: Momentum value for BatchNormalization
        block_id: Integer, a unique identification designating the block number.
    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, rows, cols, channels)` if data_format='channels_last'.
    # Output shape
        4D tensor with shape:
        `(batch, filters, new_rows, new_cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
        `rows` and `cols` values might have changed due to stride.
    # Returns
        Output tensor of block.
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    input_shape = K.int_shape(inputs)
    depthwise_conv_filters = _make_divisible(
        input_shape[channel_axis] * expansion_factor)
    pointwise_conv_filters = _make_divisible(pointwise_conv_filters * alpha)

    if depthwise_conv_filters > input_shape[channel_axis]:
        x = Conv2D(depthwise_conv_filters, (1, 1),
                   padding='same',
                   use_bias=False,
                   strides=(1, 1),
                   kernel_initializer=initializers.he_normal(),
                   kernel_regularizer=regularizers.l2(weight_decay),
                   name='conv_expand_%d' % block_id)(inputs)
        x = BatchNormalization(axis=channel_axis, momentum=bn_momentum, epsilon=bn_epsilon,
                               name='conv_expand_%d_bn' % block_id)(x)
        x = Activation(relu6, name='conv_expand_%d_relu' % block_id)(x)
    else:
        x = inputs

    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        depthwise_initializer=initializers.he_normal(),
                        depthwise_regularizer=regularizers.l2(weight_decay),
                        name='conv_dw_%d' % block_id)(x)
    x = BatchNormalization(axis=channel_axis, momentum=bn_momentum, epsilon=bn_epsilon,
                           name='conv_dw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)

    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               kernel_initializer=initializers.he_normal(),
               kernel_regularizer=regularizers.l2(weight_decay),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(axis=channel_axis, momentum=bn_momentum, epsilon=bn_epsilon,
                           name='conv_pw_%d_bn' % block_id)(x)

    if strides == (2, 2):
        return x
    else:
        if input_shape[channel_axis] == pointwise_conv_filters:

            x = add([inputs, x])
    x = attach_attention_module(x, 'cbam_block')
    return x
Ejemplo n.º 11
0
def ResCBAM(in_image=(height, width, 1)):

    img_in = Input(shape=in_image, name='image_in')
    img_in_b = img_in

    x0 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv0_0')(img_in_b)
    y0 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv0_1')(img_in_b)
    y0 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv0_2')(y0)
    y0 = attach_attention_module(y0, attention_module='cbam_block')
    conv0 = add([x0, y0])
    pool1 = MaxPooling2D(pool_size=(2, 2), name='down0')(conv0)

    x1 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv1_0')(pool1)
    #x1 = BatchNormalization(x1)
    y1 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv1_1')(pool1)
    #y1 = BatchNormalization(y1)
    y1 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv1_2')(y1)
    #y1 =  BatchNormalization(y1)
    y1 = attach_attention_module(y1, attention_module='cbam_block')

    conv1 = add([x1, y1])
    pool2 = MaxPooling2D(pool_size=(2, 2), name='down1')(conv1)

    x2 = Conv2D(256,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv2_0')(pool2)
    # x2 = BatchNormalization(x2)
    y2 = Conv2D(256,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv2_1')(pool2)
    #y2 = BatchNormalization(y2)
    y2 = Conv2D(256,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv2_2')(y2)
    #y2 =  BatchNormalization(y2)
    y2 = attach_attention_module(y2, attention_module='cbam_block')
    conv2 = add([x2, y2])
    pool3 = MaxPooling2D(pool_size=(2, 2), name='down2')(conv2)

    x3 = Conv2D(512,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv3_0')(pool3)
    #x3 = BatchNormalization(x3)

    y3 = Conv2D(512,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv3_1')(pool3)
    #y3 = BatchNormalization(y2)
    y3 = Conv2D(512,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv3_2')(y3)
    # y3 =  BatchNormalization(y2)
    y3 = attach_attention_module(y3, attention_module='cbam_block')

    conv3 = add([x3, y3])
    pool3 = AveragePooling2D(pool_size=2, name='down3')(conv3)
    down_4_f = Flatten(name='down_2_flat')(pool3)

    classification = Dense(3, activation='sigmoid',
                           name='classification')(down_4_f)

    conv5 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv5_1')(pool3)
    conv5 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv5_2')(conv5)
    drop5 = Dropout(0.2)(conv5)

    x6 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv6_0')(drop5)

    y6 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv6_1')(drop5)
    y6 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv6_2')(y6)
    y6 = attach_attention_module(y6, attention_module='cbam_block')
    xy6 = add([x6, y6])
    up6 = UpSampling2D(size=(2, 2), name='up6')(xy6)
    merge6 = concatenate([conv3, up6], axis=3)

    x7 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv7_0')(merge6)

    y7 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv7_1')(merge6)
    y7 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv7_2')(y7)
    y7 = attach_attention_module(y7, attention_module='cbam_block')
    up7 = add([x7, y7])
    up7 = UpSampling2D(size=(2, 2), name='up7')(up7)
    merge7 = concatenate([conv2, up7], axis=3)

    x8 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv8_0')(up7)

    y8 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv8_1')(up7)
    y8 = Conv2D(128,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv8_2')(y8)
    y8 = attach_attention_module(y8, attention_module='cbam_block')
    up8 = add([x8, y8])
    up8 = UpSampling2D(size=(2, 2), name='up8')(up8)
    merge8 = concatenate([conv1, up8], axis=3)

    x9 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv9_0')(merge8)

    y9 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv9_1')(merge8)
    y9 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv9_2')(y9)
    y9 = attach_attention_module(y9, attention_module='cbam_block')
    XY9 = add([x9, y9])
    up9 = UpSampling2D(size=(2, 2), name='Up9')(XY9)
    merge9 = concatenate([conv0, up9], axis=3)

    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv9_4')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv9_5')(conv9)
    segmentation = Conv2D(1, 1, activation='sigmoid',
                          name='segmentation')(merge9)

    model = Model(inputs=img_in, outputs=[segmentation, classification])
    model.summary()
    plot_model(model, to_file='model.png')
    return model
Ejemplo n.º 12
0
# for layer in model.layers[:172]:
#    layer.trainable = False
# for layer in model.layers[172:]:
#    layer.trainable = True
# change this code for every attribute - set the layers to true for training
for layer in base_model.layers:
    layer.trainable = False

# for i, layer in enumerate(base_model.layers):
#    print(i, layer.name)

# pattern attribute layer

pattern_attribute = base_model.output
pattern_attribute = attach_attention_module(pattern_attribute,
                                            attention_module)
pattern_attribute = GlobalAveragePooling2D()(pattern_attribute)
# let's add a fully-connected layer
pattern_attribute = Dropout(dropout)(pattern_attribute)
pattern_attribute_layer = Dense(1024,
                                activation='relu',
                                name="attribute_pattern")(pattern_attribute)
pattern_attribute = Dropout(dropout)(pattern_attribute)
predictions_pattern = Dense(
    no_of_classes, activation='softmax',
    name="predictions_pattern")(pattern_attribute_layer)

model = Model(inputs=base_model.input, outputs=predictions_pattern)

# model.load_weights("../models/label_pattern/label_pattern_inceptionv3_41_0.37.h5")
# print ("Checkpoint loaded.")
Ejemplo n.º 13
0
def Vgg16CBAM(in_image=(height, width, 1)):

    img_in = Input(shape=in_image, name='image_in')
    img_in_b = BatchNormalization(name='in_BN')(img_in)

    #c0 = Conv2D(8, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',name='c0')(img_in_b)
    c1 = Erosion2D(8, (3, 3), padding="same", strides=(1, 1))(img_in_b)
    c1 = Dilation2D(8, (3, 3), padding="same", strides=(1, 1))(c1)
    c1 = BatchNormalization()(c1)
    c1 = attach_attention_module(c1, attention_module='cbam_block')
    #c10 = add([c0,c1])

    cc0 = BatchNormalization(name='Morph_BN')(c1)

    cc00 = Conv2D(8,
                  3,
                  activation='relu',
                  padding='same',
                  kernel_initializer='he_normal',
                  name='cc0')(cc0)
    cc1 = Erosion2D(8, (3, 3), padding="same", strides=(1, 1))(cc0)
    cc1 = Dilation2D(8, (3, 3), padding="same", strides=(1, 1))(cc1)
    cc1 = BatchNormalization()(cc1)
    cc1 = attach_attention_module(cc1, attention_module='cbam_block')
    #cc10 = add([cc00,cc1])

    conv0 = Conv2D(32,
                   6,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv0_5')(cc1)
    conv0 = Conv2D(32,
                   6,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv0_6')(conv0)
    conv0 = BatchNormalization()(conv0)
    #conv0 = attach_attention_module(conv0, attention_module='cbam_block')
    pool0 = MaxPooling2D(pool_size=(2, 2), name='down0')(conv0)

    conv1 = Conv2D(64,
                   5,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv1_1')(pool0)
    conv1 = Conv2D(64,
                   5,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv1_2')(conv1)
    conv1 = BatchNormalization()(conv1)
    #conv1 = attach_attention_module(conv1, attention_module='cbam_block')
    pool1 = MaxPooling2D(pool_size=(2, 2), name='down1')(conv1)

    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv2_1')(pool1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv2_2')(conv2)
    conv2 = BatchNormalization()(conv2)
    # conv2 = attach_attention_module(conv2, attention_module='cbam_block')
    pool2 = MaxPooling2D(pool_size=(2, 2), name='down2')(conv2)

    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv3_1')(pool2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv3_2')(conv3)
    conv3 = BatchNormalization()(conv3)
    #conv3 = attach_attention_module(conv3, attention_module='cbam_block')
    pool3 = MaxPooling2D(pool_size=(2, 2), name='down3')(conv3)

    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv4_1')(pool3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv4_2')(conv4)
    conv4 = BatchNormalization()(conv4)
    #conv4 = attach_attention_module(conv4, attention_module='cbam_block')
    pool4 = MaxPooling2D(pool_size=(2, 2), name='down4')(conv4)

    down_4_f = Flatten(name='down_2_flat')(pool4)

    down_classsify = Dense(512, activation='relu', name='classify_1')(down_4_f)
    down_classsify = Dropout(0.6)(down_classsify)
    down_classsify = Dense(128, activation='relu',
                           name='classify_2')(down_classsify)
    down_classsify = Dropout(0.65)(down_classsify)
    classification = Dense(2, activation='sigmoid',
                           name='classification')(down_classsify)

    conv5 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv5_1')(pool4)
    conv5 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv5_2')(conv5)
    conv5 = BatchNormalization()(conv5)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv6_1')(conv5)
    up6 = UpSampling2D(size=(2, 2), name='up_1')(up6)
    merge6 = concatenate([conv4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv6_2')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv6_3')(conv6)
    conv6 = BatchNormalization()(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv7_1')(conv6)
    up7 = UpSampling2D(size=(2, 2), name='up2')(up7)
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv7_2')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv7_3')(conv7)
    conv7 = BatchNormalization()(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv8_1')(conv7)
    up8 = UpSampling2D(size=(2, 2), name='up3')(up8)
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv8_2')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv8_3')(conv8)
    conv8 = BatchNormalization()(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal',
                 name='conv9_1')(conv8)
    up9 = UpSampling2D(size=(2, 2), name='up4')(up9)
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv9_2')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal',
                   name='conv9_3')(conv9)
    conv9 = BatchNormalization()(conv9)

    up10 = Conv2D(32,
                  2,
                  activation='relu',
                  padding='same',
                  kernel_initializer='he_normal',
                  name='conv10_1')(conv9)
    up10 = UpSampling2D(size=(2, 2), name='up5')(up10)
    merge10 = concatenate([conv0, up10], axis=3)
    conv10 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal',
                    name='conv10_2')(merge10)
    conv10 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal',
                    name='conv10_3')(conv10)
    conv10 = BatchNormalization()(conv10)

    conv10 = Conv2D(2,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal',
                    name='conv10_4')(conv10)

    segmentation = Conv2D(1, 1, activation='sigmoid',
                          name='segmentation')(conv10)

    model = Model(inputs=img_in, outputs=[segmentation, classification])
    model.summary()

    plot_model(model, to_file='model.png')
    return model
Ejemplo n.º 14
0
def ResCBAM1(in_image=(height, width, 1)):

    img_in = Input(shape=in_image, name='image_in')
    img_in_b = BatchNormalization(name='in_BN')(img_in)

    x0 = Conv2D(32,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv0_0')(img_in_b)
    y0 = Conv2D(32,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv0_1')(img_in_b)
    y0 = Conv2D(32,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv0_2')(y0)
    y0 = attach_attention_module(y0, attention_module='cbam_block')
    conv0 = add([x0, y0])  #32

    x1 = conv0
    y1 = Conv2D(32,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv1_1')(conv0)
    y1 = Conv2D(32,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv1_2')(y1)
    y1 = attach_attention_module(y1, attention_module='cbam_block')

    conv1 = add([x1, y1])  #32

    x2 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv2_0')(conv1)
    y2 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv2_1')(conv1)
    y2 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv2_2')(y2)
    y2 = attach_attention_module(y2, attention_module='cbam_block')
    conv2 = add([x2, y2])  #64

    x3 = conv2
    y3 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv3_1')(conv2)
    y3 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv3_2')(y3)
    y3 = attach_attention_module(y3, attention_module='cbam_block')
    conv3 = add([x3, y3])  #64

    x4 = conv3
    y4 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv4_1')(conv3)
    y4 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv4_2')(y4)
    y4 = attach_attention_module(y4, attention_module='cbam_block')
    conv4 = add([x4, y4])

    x5 = conv4
    y5 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv5_1')(conv4)
    y5 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv5_2')(y5)
    y5 = attach_attention_module(y5, attention_module='cbam_block')
    conv5 = add([x5, y5])

    x6 = conv5
    y6 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv6_1')(conv5)
    y6 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv6_2')(y6)
    y6 = attach_attention_module(y6, attention_module='cbam_block')
    conv6 = add([x6, y6])

    x7 = conv6
    y7 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv7_1')(conv6)
    y7 = Conv2D(64,
                3,
                activation='relu',
                padding='same',
                kernel_initializer='he_normal',
                name='conv7_2')(y6)
    y7 = attach_attention_module(y7, attention_module='cbam_block')
    conv7 = add([x7, y7])

    segmentation = Conv2D(1, 1, activation='sigmoid',
                          name='segmentation')(conv7)

    pool1 = AveragePooling2D(pool_size=8, name='AvePool1')(conv7)
    down_1 = Flatten(name='down_1')(pool1)
    classification = Dense(2, activation='sigmoid',
                           name='classification')(down_1)

    model = Model(inputs=img_in, outputs=[segmentation, classification])
    model.summary()
    plot_model(model, to_file='model.png')
    return model
def CGScom(data):
    # 编码部分使用VGG166,不包含全连接层,输入图片大小定为224*224,最后一层是block5_pool(MaxPooling2D)大小为7*7*512
    # base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 13), input_tensor=data)
    # # 设置VGG模型部分为可训练
    # base.trainable = True

    x0 = layers.Conv2D(64, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block1_conv')(data)
    y = attach_attention_module(x0, 'cbam_block')
    fusion = layers.add([x0, y])
    x0 = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x0)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x1 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x2 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x3 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x4 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x5 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    # ##自己写的

    # # 14*14*512
    x = layers.Deconv2D(512, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv1')(x5)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x4])
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx1 = layers.Activation('relu')(fusion)
    # x = layers.Conv2D(512, (1, 1), padding='same', activation='relu', name='deconv1_conv4')(x)
    # y = attach_attention_module(x, 'cbam_block')
    # x = layers.add([x, y])
    # x = layers.Activation('relu')(x)

    # 28*28*256
    x = layers.Deconv2D(256, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv2')(xx1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x3])
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv3')(x)
    # x = layers.Conv2D(256, (1, 1), padding='same', activation='relu', name='deconv2_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx2 = layers.Activation('relu')(fusion)

    # 56*56*128
    x = layers.Deconv2D(128, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv3')(xx2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x2])
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv3')(x)
    # x = layers.Conv2D(128, (1, 1), padding='same', activation='relu', name='deconv3_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx3 = layers.Activation('relu')(fusion)

    # 112*112*64
    x = layers.Deconv2D(64, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv4')(xx3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x1])
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv2')(x)
    # x = layers.Conv2D(64, (1, 1), padding='same', activation='relu', name='deconv4_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx4 = layers.Activation('relu')(fusion)

    # 224*224*32
    x = layers.Deconv2D(32, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv5')(xx4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x0])
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv2')(x)
    # x = layers.Conv2D(32, (1, 1), padding='same', activation='relu', name='deconv5_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)

    # 224*224*1 结果1
    x0 = layers.Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       name='conv6_end')(x)

    # # train
    # return [x0, x0]
    # # return [x, fusion]

    # test
    m = Model(data, x0, name='CGScom')
    return m
Ejemplo n.º 16
0
# for layer in model.layers[:172]:
#    layer.trainable = False
# for layer in model.layers[172:]:
#    layer.trainable = True

# change this code for every attribute - set the layers to true for training
for layer in base_model.layers:
    layer.trainable = False

# for i, layer in enumerate(base_model.layers):
#    print(i, layer.name)

# gender attribute layer

gender_attribute = base_model.output
gender_attribute = attach_attention_module(gender_attribute, attention_module)
gender_attribute = GlobalAveragePooling2D()(gender_attribute)
# let's add a fully-connected layer
gender_attribute = Dropout(dropout)(gender_attribute)
gender_attribute_layer = Dense(1024,
                               activation='relu',
                               name="attribute_gender")(gender_attribute)
predictions_gender = Dense(no_of_classes,
                           activation='softmax',
                           name="predictions_gender")(gender_attribute_layer)

model = Model(inputs=base_model.input, outputs=predictions_gender)

lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
Ejemplo n.º 17
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
                          depth_multiplier=1, strides=(1, 1), block_id=1, attention_module=None):
    """Adds a depthwise convolution block.
    A depthwise convolution block consists of a depthwise conv,
    batch normalization, relu6, pointwise convolution,
    batch normalization and relu6 activation.
    # Arguments
        inputs: Input tensor of shape `(rows, cols, channels)`
            (with `channels_last` data format) or
            (channels, rows, cols) (with `channels_first` data format).
        pointwise_conv_filters: Integer, the dimensionality of the output space
            (i.e. the number output of filters in the pointwise convolution).
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: The number of depthwise convolution output channels
            for each input channel.
            The total number of depthwise convolution output
            channels will be equal to `filters_in * depth_multiplier`.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
            Specifying any stride value != 1 is incompatible with specifying
            any `dilation_rate` value != 1.
        block_id: Integer, a unique identification designating the block number.
    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, rows, cols, channels)` if data_format='channels_last'.
    # Output shape
        4D tensor with shape:
        `(batch, filters, new_rows, new_cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
        `rows` and `cols` values might have changed due to stride.
    # Returns
        Output tensor of block.
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id)(inputs)
    x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)

    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)

    # attention_module
    if attention_module is not None:
        x = attach_attention_module(x, attention_module)
		
    return x