def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4): '''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout # Arguments x: input tensor stage: index for dense block branch: layer index within each dense block nb_filter: number of filters dropout_rate: dropout rate weight_decay: weight decay factor ''' eps = 1.1e-5 conv_name_base = 'conv' + str(stage) + '_' + str(branch) relu_name_base = 'relu' + str(stage) + '_' + str(branch) # 1x1 Convolution (Bottleneck layer) inter_channel = nb_filter * 4 x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x) x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x) x = Activation('relu', name=relu_name_base+'_x1')(x) x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) # 3x3 Convolution x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x) x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x) x = Activation('relu', name=relu_name_base+'_x2')(x) x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x) x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4): ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout # Arguments x: input tensor stage: index for dense block nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor ''' eps = 1.1e-5 conv_name_base = 'conv' + str(stage) + '_blk' relu_name_base = 'relu' + str(stage) + '_blk' pool_name_base = 'pool' + str(stage) x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x) x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x) x = Activation('relu', name=relu_name_base)(x) x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x) return x
def DenseNet121(nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, classes=1000, weights_path=None): '''Instantiate the DenseNet 121 architecture, # Arguments nb_dense_block: number of dense blocks to add to end growth_rate: number of filters to add per dense block nb_filter: initial number of filters reduction: reduction factor of transition blocks. dropout_rate: dropout rate weight_decay: weight decay factor classes: optional number of classes to classify images weights_path: path to pre-trained weights # Returns A Keras model instance. ''' eps = 1.1e-5 # compute compression factor compression = 1.0 - reduction # Handle Dimension Ordering for different backends global concat_axis if K.image_dim_ordering() == 'tf': concat_axis = 3 img_input = Input(shape=(224, 224, 3), name='data') else: concat_axis = 1 img_input = Input(shape=(3, 224, 224), name='data') # From architecture for ImageNet (Table 1 in the paper) nb_filter = 64 nb_layers = [6, 12, 24, 16] # For DenseNet-121 # Initial convolution x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input) x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x) x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x) x = Scale(axis=concat_axis, name='conv1_scale')(x) x = Activation('relu', name='relu1')(x) x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x) x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x) # Add dense blocks for block_idx in range(nb_dense_block - 1): stage = block_idx + 2 x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay) # Add transition_block x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay) nb_filter = int(nb_filter * compression) final_stage = stage + 1 x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay) x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv' + str(final_stage) + '_blk_bn')(x) x = Scale(axis=concat_axis, name='conv' + str(final_stage) + '_blk_scale')(x) x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x) x = GlobalAveragePooling2D(name='pool' + str(final_stage))(x) print(x.shape) out = Dense(classes, name='fc6')(x) out = Activation('softmax', name='prob')(out) model = Model(img_input, out, name='densenet') if weights_path is not None: model.load_weights(weights_path) model2 = Model(img_input, x) return model2