示例#1
0
def se_identity_block(x, nb_filters, name, kernel_size=3):
    k1, k2, k3 = nb_filters
    convname1 = name + 'conv1'
    convname2 = name + 'conv2'
    convname3 = name + 'conv3'
    bnname1 = name + 'bn1'
    bnname2 = name + 'bn2'
    bnname3 = name + 'bn3'
    out = Conv3D(k1, 1, strides=1, kernel_initializer='he_normal', name = convname1)(x)
    out = BatchNormalization(axis = -1, epsilon = 1e-6,  name = bnname1)(out)
    out = Activation('relu')(out)

    out = Conv3D(k2, kernel_size, strides=1, padding='same', kernel_initializer='he_normal', name = convname2)(out)
    out = BatchNormalization(axis = -1, epsilon = 1e-6,  name = bnname2)(out)
    out = Activation('relu')(out)

    out = Conv3D(k3, 1, strides=1, kernel_initializer='he_normal', name = convname3)(out)
    out = BatchNormalization(axis = -1, epsilon = 1e-6,  name = bnname3)(out)

    out = squeeze_excite_block3d(out)

    out = add([out, x])
    # out = merge([out, x], mode='sum')
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    return out
示例#2
0
def se_dense_net(nb_layers, growth_rate=12, nb_filter=64, bottleneck=True, reduction=0.1, dropout_rate=None, subsample_initial_block=True,classes=2):

    inputs = Input(shape=(280, 280, 16, 1))
    print("0 :inputs shape:", inputs.shape)

    # 设定每个denseblock中convblock的数量:nb_layers = [3,3,3]

    concat_axis = -1  # 设定concat的轴(即叠加的轴)
    bn_axis = -1  # 设定BN的轴(即叠加的轴)
    nb_dense_block = nb_layers.__len__()  # nb_dense_block :denseblock的数量,需要和nb_layers对应
    final_nb_layer = nb_layers[-1]
    compression = 1.0 - reduction  # denseblock的通道衰减率,即实际输出通道数=原输出通道数x通道衰减率

    # Initial convolution =======================================================================================
    if subsample_initial_block:
        initial_kernel = (7, 7, 7)
        initial_strides = (2, 2, 1)
    else:
        initial_kernel = (3, 3, 3)
        initial_strides = (1, 1, 1)

    x = Conv3D(nb_filter, initial_kernel, kernel_initializer='he_normal', padding='same',
               strides=initial_strides, use_bias=False)(inputs)

    if subsample_initial_block:
        x = BatchNormalization(axis=bn_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding='same')(x)

    print("0 :Initial conv shape:", x.shape)
    # Initial convolution finished ================================================================================

    # Add dense blocks start  ==================================================================================
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, concat_axis=concat_axis,
                                     bn_axis=bn_axis, bottleneck=bottleneck,
                                     dropout_rate=dropout_rate, grow_nb_filters=True)
        print(block_idx+1, ":dense_block shape:", x.shape)

        x = __transition_block(x, nb_filter, compression=compression, concat_axis=concat_axis, bias_allow=False)
        print(block_idx+1, ":transition_block shape:", x.shape)

        x = squeeze_excite_block3d(x)
        print(block_idx + 1, ":se_block_out shape:", x.shape)

        nb_filter = int(nb_filter * compression)
    # Add dense blocks finish ==================================================================================

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, concat_axis=concat_axis, bn_axis=bn_axis,
                                 bottleneck=bottleneck, dropout_rate=dropout_rate, grow_nb_filters=True)
    print(nb_dense_block, ":dense_block shape:", x.shape)

    x = BatchNormalization(axis=bn_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    out = GlobalAveragePooling3D(data_format='channels_last')(x)
    print("GApooling shape:", out.shape)
    out_drop = Dropout(rate=0.3)(out)
    out = Dense(classes, name='fc1')(out_drop)
    print("out shape:", out.shape)
    output = Activation(activation='sigmoid')(out)

    model = Model(input=inputs, output=output)
    #mean_squared_logarithmic_error or binary_crossentropy
    #model.compile(optimizer=SGD(lr=1e-6, momentum=0.9), loss=EuiLoss, metrics=[y_t, y_pre, Acc] )

    return model