Exemplo n.º 1
0
def transition_SE_layer(input_tensor,
                        numFilters,
                        compressionFactor=1.0,
                        se_ratio=16):

    numOutPutFilters = int(numFilters * compressionFactor)

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    x = BatchNormalization(axis=bn_axis)(input_tensor)
    x = Activation('relu')(x)

    x = Conv2D(numOutPutFilters, (1, 1),
               strides=(1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)

    # SE Block
    x = squeeze_excitation_block(x, ratio=se_ratio)
    #x = BatchNormalization(axis=bn_axis)(x)

    # downsampling
    x = AveragePooling2D((2, 2),
                         strides=(2, 2),
                         padding='valid',
                         data_format='channels_last',
                         name='')(x)

    #x = squeeze_excitation_block(x, ratio=se_ratio)

    return x, numOutPutFilters
Exemplo n.º 2
0
def projection_block(input_tensor, filters, stage, block, se_enabled=False, se_ratio=16):
    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    # downsampling directly by convolution with stride 2
    x = Conv2D(numFilters1, (3, 3), padding='same', strides=(2, 2), kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(numFilters2, (3, 3), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x)
    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    # projection shortcut convolution
    x_shortcut = Conv2D(numFilters2, (1, 1), strides=(2, 2), kernel_initializer='he_normal', name=conv_name_base + '1')(
        input_tensor)
    x_shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(x_shortcut)

    # addition of shortcut
    x = Add()([x, x_shortcut])
    x = Activation('relu')(x)

    return x
Exemplo n.º 3
0
def identity_block(input_tensor, filters, stage, block, se_enabled=False, se_ratio=16):
    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    x = Conv2D(numFilters1, (3, 3), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2a')(
        input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(numFilters2, (3, 3), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x)
    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    x = Add()([x, input_tensor])

    x = Activation('relu')(x)
    return x
Exemplo n.º 4
0
def dense_SE_block(input_tensor,
                   numInputFilters,
                   numLayers=1,
                   growthRate_k=12,
                   bottleneck_enabled=False,
                   se_ratio=16):

    if K.image_data_format() == 'channels_last':
        concat_axis = -1
        bn_axis = -1
    else:
        concat_axis = 1
        bn_axis = 1

    concat_features = input_tensor

    for i in range(numLayers):
        x = BatchNormalization(axis=bn_axis, name='')(concat_features)
        x = Activation('relu')(x)

        if bottleneck_enabled == True:
            x = Conv2D(
                4 * growthRate_k, (1, 1),
                strides=(1, 1),
                kernel_initializer='he_normal',
                padding='same'
            )(
                x
            )  # "in our experiments, we let each 1x1 conv produce 4k feature maps
            x = BatchNormalization(axis=bn_axis)(x)
            x = Activation('relu')(x)

        x = Conv2D(growthRate_k, (3, 3),
                   strides=(1, 1),
                   kernel_initializer='he_normal',
                   padding='same')(x)
        concat_features = concatenate([x, concat_features], axis=concat_axis)

        numInputFilters += growthRate_k

    # SE-Block
    concat_features = squeeze_excitation_block(concat_features, ratio=se_ratio)

    return concat_features, numInputFilters