Exemplo n.º 1
0
def __transition_block(ip,
                       nb_filter,
                       compression=1.0,
                       dropout_rate=None,
                       weight_decay=1E-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        
    Returns: keras tensor, after applying BN, Relu-Conv, Dropout, Maxpool ops
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(int(nb_filter * compression), (1, 1),
               kernel_initializer='he_uniform',
               padding='same',
               use_bias=True,
               kernel_regularizer=l2(weight_decay))(ip)
    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x = dl_layers.custom_swish()(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    return x
Exemplo n.º 2
0
def __conv_block(ip,
                 nb_filter,
                 bottleneck=False,
                 dropout_rate=None,
                 weight_decay=1E-4):
    ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
    Args:
        ip: Input keras tensor
        nb_filter: number of filters
        bottleneck: add bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if bottleneck:
        x = Conv2D(nb_filter * 4, (1, 1),
                   kernel_initializer='he_uniform',
                   padding='same',
                   use_bias=True,
                   kernel_regularizer=l2(weight_decay))(ip)

        x = BatchNormalization(axis=concat_axis,
                               gamma_regularizer=l2(weight_decay),
                               beta_regularizer=l2(weight_decay))(x)
        x = dl_layers.custom_swish()(x)
    else:
        x = ip

    x = Conv2D(nb_filter, (3, 3),
               kernel_initializer='he_uniform',
               padding='same',
               use_bias=True,
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x = dl_layers.custom_swish()(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x
Exemplo n.º 3
0
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       final_dropout=DEFAULT_FINAL_DROPOUT_RATE,
                       weight_decay=1E-4,
                       activation='softmax'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
					be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: general dropout rate
        final_dropout: dropout rate of final FC layer
        weight_decay: weight decay
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                Note that if sigmoid is used, classes must be 1.
                
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'Reduction value must lie between 0.0 and 1.0'

    # Get layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == nb_dense_block, 'If list, nb_layer is used as provided. ' \
                                                       'Note that list size must be nb_dense_block'

        assert (np.sum(np.array(nb_layers)) + (nb_dense_block + 1) == depth), \
                        ('Total number of layers must add up to %d.' % depth)

    else:
        if nb_layers_per_block == -1:
            assert ((depth - (nb_dense_block + 1)) % nb_dense_block == 0), \
      'Depth (minus nb_dense_block + 1) must be divisible by number of nb_dense_block'

            nb_layers_per_block = int(
                (depth - (nb_dense_block + 1)) / nb_dense_block)

        nb_layers = [nb_layers_per_block] * nb_dense_block

    if bottleneck:
        for layer in nb_layers:
            assert layer % 2 == 0, 'Blocks with bottleneck must have even number of layers within the block'

        nb_layers = [int(layer / 2) for layer in nb_layers]

    # Compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = int(2 * growth_rate)

    # Compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    x = Conv2D(nb_filter, (3, 3),
               kernel_initializer='he_uniform',
               padding='same',
               name='initial_conv2D',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    x = BatchNormalization(axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x = dl_layers.custom_swish()(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)

        # Add transition_block (for every block except the last one)
        if block_idx != (nb_dense_block - 1):
            x = __transition_block(x,
                                   nb_filter,
                                   compression=compression,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)
            nb_filter = int(nb_filter * compression)

    # Add final FC layer if requested
    if include_top:
        # Once done, take an average pool of the spatial dimensions
        x = GlobalAveragePooling2D(name="final_embeddings")(x)

        if final_dropout:
            x = Dropout(final_dropout, name="final_dropout")(x)

        x = Dense(nb_classes,
                  activation=activation,
                  kernel_regularizer=l2(weight_decay),
                  bias_regularizer=l2(weight_decay))(x)

    # Return final embeddings
    return x