def spatial_attention(input_feature):
    kernel_size = 7
    if K.image_data_format() == "channels_first":
        channel = input_feature._keras_shape[1]
        cbam_feature = Permute((2, 3, 1))(input_feature)
    else:
        channel = input_feature._keras_shape[-1]
        cbam_feature = input_feature

    avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
    assert avg_pool._keras_shape[-1] == 1
    max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
    assert max_pool._keras_shape[-1] == 1
    concat = Concatenate(axis=3)([avg_pool, max_pool])
    assert concat._keras_shape[-1] == 2
    cbam_feature = Conv2D(filters=1,
                          kernel_size=kernel_size,
                          strides=1,
                          padding='same',
                          activation='sigmoid',
                          kernel_initializer='he_normal',
                          use_bias=False)(concat)
    assert cbam_feature._keras_shape[-1] == 1

    if K.image_data_format() == "channels_first":
        cbam_feature = Permute((3, 1, 2))(cbam_feature)

    return multiply([input_feature, cbam_feature])
Ejemplo n.º 2
0
def squeeze_excite_block(input, ratio=16):
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x
Ejemplo n.º 3
0
def dense_block(x,
                nb_layers,
                nb_filter,
                growth_rate,
                bottleneck=False,
                dropout_rate=None,
                weight_decay=1e-4,
                grow_nb_filters=True,
                return_concat_list=False):
    '''Build a dense_block where the output of ench conv_block is fed t subsequent ones
        Args:
            x: keras tensor
            nb_layser: the number of layers of conv_block to append to the model
            nb_filter: number of filters
            growth_rate: growth rate
            bottleneck: bottleneck block
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
            return_concat_list: return the list of feature maps along with the actual output
        Returns:
            keras tensor with nb_layers of conv_block appened
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)
        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter
Ejemplo n.º 4
0
def conv_block(ip,
               nb_filter,
               bottleneck=False,
               dropout_rate=None,
               weight_decay=1e-4):
    ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
        Args:
            ip: Input keras tensor
            nb_filter: number of filters
            bottleneck: add bottleneck block
            dropout_rate: dropout rate
            weight_decay: weight decay factor
        Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
    '''
    concat_axis = 1 if K.image_data_format() == 'channel_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)

    if bottleneck:
        inter_channel = nb_filter * 4
        x = Conv2D(inter_channel, (1, 1),
                   kernel_initializer='he_normal',
                   padding='same',
                   use_bias=False,
                   kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)

    x = Conv2D(nb_filter, (3, 3),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x
Ejemplo n.º 5
0
def transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    '''Apply BatchNorm, ReLU, Conv2d, optional compressoin, dropout and Maxpooling2D
        Args:
            ip: keras tensor
            nb_filter: number of filters
            compression: caculated as 1 - reduction. Reduces the number of features maps in the transition block
            dropout_rate: dropout rate
            weight_decay: weight decay factor
        Returns:
            keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_filter * compression), (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    return x
Ejemplo n.º 6
0
validation_data_dir = 'C:\images\Validation'

nb_train_samples = 2500
nb_validation_samples = 400
epochs = 2
batch_size = 24

# classifier = Sequential()
# classifier.add(Convolution2D(32,3,3, input_shape = (img_width, img_height, 3), activation='relu'))
# classifier.add(MaxPooling2D(pool_size=(2,2)))
# classifier.add(Flatten())
#
# classifier.add(Dense(output_dim = 128, activation='relu'))
# classifier.add(Dense(output_dim = 1, activation='sigmoid'))

if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape=input_shape))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))

classifier.add(Conv2D(32, (3, 3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))

classifier.add(Conv2D(64, (3, 3)))
classifier.add(Activation('relu'))
Ejemplo n.º 7
0
        print(logs)

    def on_epoch_end(self, batch, logs={}):
        print('on_epoch_end')
        print(logs)

    def on_batch_end(self, batch, logs={}):
        print('on_batch_end')
        print(logs)

    def on_train_end(self, batch, logs={}):
        print('on_train_end')
        print(logs)


print(K.image_data_format())

x = Input(shape=(1, ))
y = tf.constant(6.0, tf.float32, name='accuracy')
model = Model(inputs=x, outputs=x)
model.summary()

model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

data = np.random.random((3, 1))
labels = np.random.randint(2, size=(3, 1))
labels.reshape([3, -1])

dataset = tf.data.Dataset.from_tensor_slices((data, labels))
Ejemplo n.º 8
0
def create_dense_net(nb_classes,
                     img_input,
                     include_top,
                     depth=40,
                     nb_dense_block=3,
                     growth_rate=12,
                     nb_filter=-1,
                     nb_layers_per_block=[1],
                     bottleneck=False,
                     reduction=0.0,
                     dropout_rate=None,
                     weight_decay=1e-4,
                     subsample_initial_block=False,
                     activation='softmax'):
    ''' Build the DenseNet model
        Args:
            nb_classes: number of classes
            img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
            include_top: flag to include the final Dense layer
            depth: number or layers
            nb_dense_block: number of dense blocks to add to end (generally = 3)
            growth_rate: number of filters to add per dense block
            nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
            nb_layers_per_block: list, number of layers in each dense block
            bottleneck: add bottleneck blocks
            reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
            dropout_rate: dropout rate
            weight_decay: weight decay rate
            subsample_initial_block: Set to True to subsample the initial convolution and
                    add a MaxPool2D before the dense blocks are added.
            subsample_initial:
            activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                    Note that if sigmoid is used, classes must be 1.
        Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channel_first' else -1

    if type(nb_layers_per_block) is not list:
        print('nb_layers_per_block should be a list!!!')
        return 0

    final_nb_layer = nb_layers_per_block[-1]
    nb_layers = nb_layers_per_block[:-1]

    if nb_filter <= 0:
        nb_filter = 2 * growth_rate
    compression = 1.0 - reduction
    if subsample_initial_block:
        initial_kernel = (7, 7)
        initial_strides = (2, 2)
    else:
        initial_kernel = (3, 3)
        initial_strides = (1, 1)

    x = Conv2D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)
    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    for block_index in range(nb_dense_block - 1):
        x, nb_filter = dense_block(x,
                                   nb_layers[block_index],
                                   nb_filter,
                                   growth_rate,
                                   bottleneck=bottleneck,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)
        x = transition_block(x,
                             nb_filter,
                             compression=compression,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # 最后一个block没有transition_block
    x, nb_filter = dense_block(x,
                               final_nb_layer,
                               nb_filter,
                               growth_rate,
                               bottleneck=bottleneck,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
Ejemplo n.º 9
0
def learn_cnn(x_train, y_train, x_test, y_test):
    num_classes = 10

    # input image dimensions
    img_rows, img_cols = 28, 28

    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.sgd(),
                  metrics=['acc'])
    plot_model(model, to_file='cnn_model.png')
    m = model.fit(x_train,
                  y_train,
                  validation_data=(x_test, y_test),
                  epochs=10,
                  batch_size=64,
                  verbose=2)
    plt.plot(m.history['acc'])
    plt.plot(m.history['val_acc'])
    plt.title('learn cnn MNIST accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.savefig("learn_cnn_MNIST_accuracy.png")
    plt.show()

    plt.plot(m.history['loss'])
    plt.plot(m.history['val_loss'])
    plt.title('learn cnn MNIST loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.savefig("learn_cnn_MNIST_loss.png")
    plt.show()