Exemple #1
0
def createDenseNet(nb_classes, img_dim, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=16, dropout_rate=None,
                     weight_decay=1E-4, verbose=True):
    ''' Build the create_dense_net model
    Args:
        nb_classes: number of classes
        img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end
        growth_rate: number of filters to add
        nb_filter: number of filters
        dropout_rate: dropout rate
        weight_decay: weight decay
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    model_input = Input(shape=img_dim)

    concat_axis = 1 if K.image_data_format() == "channels_first" else -1

    assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"

    # layers in each dense block
    nb_layers = int((depth - 4) / 3)

    # Initial convolution
    x = Convolution3D(nb_filter, (3, 3 ,3), kernel_initializer="he_uniform", padding="same", name="initial_conv3D", use_bias=False,
                      kernel_regularizer=l2(weight_decay))(model_input)

    x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                            beta_regularizer=l2(weight_decay))(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)
        # add transition_block
        x = transition_block(x, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)

    # The last dense_block does not have a transition_block
    x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = Activation('relu')(x)
    x = GlobalAveragePooling3D()(x)
    x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x)

    densenet = Model(inputs=model_input, outputs=x)

    if verbose: 
        print("DenseNet-%d-%d created." % (depth, growth_rate))

    return densenet
Exemple #2
0
def globalpool_matlab_mri_vgg_3d(input_shape, num_classes):
    # Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
    # change padding from same to valid 18.9.8
    # 9.20 divided kernel numbers with 8
    model = Sequential()
    weight_decay = 0.0001  #0.0005 # 0.0001

    model.add(Conv3D(8, (3, 3, 3), padding='same',
                     input_shape=input_shape))  # 9.13 64 to 32 除以2
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.3))

    model.add(Conv3D(8, (3, 3, 3), padding='same'))  # 9.13 64 to 32
    model.add(Activation('relu'))
    model.add(BatchNormalization())

    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))

    model.add(Conv3D(32, (3, 3, 3), padding='same'))  # 9.13 128 to 64
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.4))

    model.add(Conv3D(32, (3, 3, 3), padding='same'))  # 9.13 128 to 64
    model.add(Activation('relu'))
    model.add(BatchNormalization())

    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))

    model.add(Conv3D(64, (3, 3, 3), padding='same'))  # 9.13 256 to 128
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.4))

    model.add(Conv3D(64, (3, 3, 3), padding='same'))  # 9.13 256 to 128
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.4))

    model.add(Conv3D(128, (3, 3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(BatchNormalization())

    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))

    model.add(Conv3D(128, (3, 3, 3),
                     padding='same'))  # 9.13 512 to 256 same to valid
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.4))

    #model.add(Conv3D(512, (3, 3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Conv3D(256, (3, 3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    #model.add(Dropout(0.4))

    model.add(Conv3D(256, (3, 3, 3), padding='same'))  # 512 to 256
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    # model.add(Dropout(0.2))
    model.add(GlobalAveragePooling3D())

    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    return model
Exemple #3
0
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1e-4,
                       subsample_initial_block=False,
                       activation='softmax',
                       attention=True,
                       spatial_attention=True,
                       temporal_attention=True):

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
                                                   'Note that list size must be (nb_dense_block)'
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            assert (
                depth - 4
            ) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
            count = int((depth - 4) / 3)

            if bottleneck:
                count = count // 2

            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    if subsample_initial_block:
        initial_kernel = (5, 5, 3)
        initial_strides = (2, 2, 1)
    else:
        initial_kernel = (3, 3, 1)
        initial_strides = (1, 1, 1)

    x = Conv3D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)
    x = Conv3D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)
        # add attention_block
        if attention:
            x = Attention_block(x,
                                spatial_attention=spatial_attention,
                                temporal_attention=temporal_attention)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x,
                                 final_nb_layer,
                                 nb_filter,
                                 growth_rate,
                                 bottleneck=bottleneck,
                                 dropout_rate=dropout_rate,
                                 weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling3D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1e-4,
                       subsample_initial_block=False,
                       activation='softmax',
                       attention=True,
                       spatial_attention=True,
                       temporal_attention=True):
    """

    @param nb_classes: 分类数量
    @param img_input:  tuple of shape (channels, rows, columns) or (rows, columns, channels)
    @param include_top: flag to include the final Dense layer
    @param depth:
        @todo number or layers . why?
    @param nb_dense_block: number of dense blocks to add to end (generally = 3)
    @param growth_rate: number of filters to add per dense block
    @param nb_filter: initial number of filters
    @param nb_layers_per_block: number of layers in each dense block
    @param bottleneck: add bottleneck blocks
    @param reduction: eduction factor of transition blocks. Note : reduction value is inverted to compute compression
    @param dropout_rate: dropout rate
    @param weight_decay: weight decay rate
    @param subsample_initial_block:
        @todo Set to True to subsample the initial convolution and
                    add a MaxPool3D before the dense blocks are added.
    @param activation:
        Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                    Note that if sigmoid is used, classes must be 1.
    @param attention:...
    @param spatial_attention:...
    @param temporal_attention:...
    @return:
    """
    # 如果channel 在第一维的话 那此值为1 否则(tensorflow默认)为-1
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    # layers in each dense block
    # 两种方式来确定layer的数量 1. depth确定 2. 直接通过规定nb_layers_per_block 来确定
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
                                                   'Note that list size must be (nb_dense_block)'
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            assert (
                depth - 4
            ) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
            count = int((depth - 4) / 3)

            if bottleneck:
                count = count // 2

            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    # compression factor in transition layer
    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'
    compression = 1.0 - reduction

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

    # Initial convolution
    # todo : 2. 三次卷积插值
    if subsample_initial_block:
        initial_kernel = (5, 5, 3)
        initial_strides = (2, 2, 1)
    else:
        initial_kernel = (3, 3, 1)
        initial_strides = (1, 1, 1)

    x = Conv3D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    # Add dense blocks 在这里调换attention的位置
    for block_idx in range(nb_dense_block - 1):
        # add attention_block
        if attention:
            x = Attention_block(x,
                                spatial_attention=spatial_attention,
                                temporal_attention=temporal_attention)

        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    if attention:
        x = Attention_block(x,
                            spatial_attention=spatial_attention,
                            temporal_attention=temporal_attention)
    x, nb_filter = __dense_block(x,
                                 final_nb_layer,
                                 nb_filter,
                                 growth_rate,
                                 bottleneck=bottleneck,
                                 dropout_rate=dropout_rate,
                                 weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling3D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
Exemple #5
0
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1E-4,
                       activation='softmax'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
                be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, "reduction value must lie between 0.0 and 1.0"

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block + 1), "If list, nb_layer is used as provided. " \
                                                       "Note that list size must be (nb_dense_block + 1)"
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            count = int((depth - 4) / 3)
            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    if bottleneck:
        nb_layers = [int(layer // 2) for layer in nb_layers]

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    x = Convolution3D(nb_filter,
                      3,
                      3,
                      3,
                      init="he_uniform",
                      border_mode="same",
                      name="initial_conv2D",
                      bias=False,
                      W_regularizer=l2(weight_decay))(img_input)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x,
                                 final_nb_layer,
                                 nb_filter,
                                 growth_rate,
                                 bottleneck=bottleneck,
                                 dropout_rate=dropout_rate,
                                 weight_decay=weight_decay)

    x = BatchNormalization(mode=0,
                           axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling3D()(x)

    if include_top:
        x = Dense(nb_classes,
                  activation=activation,
                  W_regularizer=l2(weight_decay),
                  b_regularizer=l2(weight_decay))(x)

    return x