def build_segnet_basic(inputs, n_classes, depths=[64, 64, 64, 64], filter_size=7, l2_reg=0.): """ encoding layers """ enc1 = downsampling_block_basic(inputs, depths[0], filter_size, l2(l2_reg)) enc2 = downsampling_block_basic(enc1, depths[1], filter_size, l2(l2_reg)) enc3 = downsampling_block_basic(enc2, depths[2], filter_size, l2(l2_reg)) enc4 = downsampling_block_basic(enc3, depths[3], filter_size, l2(l2_reg)) """ decoding layers """ dec1 = upsampling_block_basic(enc4, depths[3], filter_size, enc4, l2(l2_reg)) dec2 = upsampling_block_basic(dec1, depths[2], filter_size, enc3, l2(l2_reg)) dec3 = upsampling_block_basic(dec2, depths[1], filter_size, enc2, l2(l2_reg)) dec4 = upsampling_block_basic(dec3, depths[0], filter_size, enc1, l2(l2_reg)) """ logits """ l1 = Convolution2D(n_classes, 1, 1, border_mode='valid')(dec4) score = CropLayer2D(inputs, name='score')(l1) softmax_segnet = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=softmax_segnet) return model
def build_segnet_vgg(inputs, n_classes, l2_reg=0.): """ encoding layers """ enc1 = downsampling_block_vgg(inputs, 2, 64, 3, 1, l2_reg) enc2 = downsampling_block_vgg(enc1, 2, 128, 3, 2, l2_reg) enc3 = downsampling_block_vgg(enc2, 3, 256, 3, 3, l2_reg) enc4 = downsampling_block_vgg(enc3, 3, 512, 3, 4, l2_reg) enc5 = downsampling_block_vgg(enc4, 3, 512, 3, 5, l2_reg) """ decoding layers """ dec5 = upsampling_block_vgg(enc5, 3, 512, 3, 5, l2_reg, enc5) dec4 = upsampling_block_vgg(dec5, 3, 512, 3, 4, l2_reg, enc4) dec3 = upsampling_block_vgg(dec4, 3, 256, 3, 3, l2_reg, enc3) dec2 = upsampling_block_vgg(dec3, 2, 128, 3, 2, l2_reg, enc2) dec1 = upsampling_block_vgg(dec2, 2, 64, 3, 1, l2_reg, enc1) """ logits """ l1 = Convolution2D(n_classes, 1, 1, border_mode='valid')(dec1) score = CropLayer2D(inputs, name='score')(l1) softmax_segnet = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=softmax_segnet) return model
def build_segnet_basic(inputs, n_classes, depths=[64, 64, 64, 64], filter_size=7, l2_reg=0.): """ encoding layers """ print('******++++*** ENTRE AQUI ++++++*******') enc1 = downsampling_block_basic(inputs, depths[0], filter_size, l2(l2_reg)) enc2 = downsampling_block_basic(enc1, depths[1], filter_size, l2(l2_reg)) enc3 = downsampling_block_basic(enc2, depths[2], filter_size, l2(l2_reg)) enc4 = downsampling_block_basic(enc3, depths[3], filter_size, l2(l2_reg)) """ decoding layers """ dec1 = upsampling_block_basic(enc4, depths[3], filter_size, enc4, l2(l2_reg)) dec2 = upsampling_block_basic(dec1, depths[2], filter_size, enc3, l2(l2_reg)) dec3 = upsampling_block_basic(dec2, depths[1], filter_size, enc2, l2(l2_reg)) dec4 = upsampling_block_basic(dec3, depths[0], filter_size, enc1, l2(l2_reg)) """ logits """ l1 = Conv2D(n_classes, (1, 1), padding='valid')(dec4) score = CropLayer2D(inputs, name='score')(l1) print('******++++*** score shape ++++++*******', str(score.shape)) softmax_segnet = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=softmax_segnet) return model
def build_segnet_basic(img_shape=(3, None, None), nclasses=8, l2_reg=0.,\ freeze_layers_from=None, path_weights=None,\ depths=[64, 64, 64, 64],filter_size=7): # Regularization warning if l2_reg > 0.: print("Regularizing the weights: " + str(l2_reg)) # Build network # CONTRACTING PATH # Input layer inputs = Input(img_shape) padded1 = ZeroPadding2D(padding=(1, 1), name='pad1')(inputs) enc1 = downsampling_block_basic(padded1, depths[0], filter_size, l2(l2_reg)) enc2 = downsampling_block_basic(enc1, depths[1], filter_size, l2(l2_reg)) enc3 = downsampling_block_basic(enc2, depths[2], filter_size, l2(l2_reg)) enc4 = downsampling_block_basic(enc3, depths[3], filter_size, l2(l2_reg)) # ##### decoding layers dec1 = upsampling_block_basic(enc4, depths[3], filter_size, enc4, l2(l2_reg)) dec2 = upsampling_block_basic(dec1, depths[2], filter_size, enc3, l2(l2_reg)) dec3 = upsampling_block_basic(dec2, depths[1], filter_size, enc2, l2(l2_reg)) dec4 = upsampling_block_basic(dec3, depths[0], filter_size, enc1, l2(l2_reg)) l1 = Convolution2D(nclasses, 1, 1, border_mode='valid')(dec4) score = CropLayer2D(inputs, name='score')(l1) # Softmax softmax_segnet = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=softmax_segnet) # Load pretrained Model if path_weights: load_matcovnet(model, n_classes=nclasses) # Freeze some layers if freeze_layers_from is not None: freeze_layers(model, freeze_layers_from) return model
def segnet_basic(inp, kernel, concat_axis, n_classes, l2r): # Encoding layers enc1 = downsampling_block_basic(inp, 64, kernel, concat_axis, l2(l2r)) enc2 = downsampling_block_basic(enc1, 64, kernel, concat_axis, l2(l2r)) enc3 = downsampling_block_basic(enc2, 64, kernel, concat_axis, l2(l2r)) enc4 = downsampling_block_basic(enc3, 64, kernel, concat_axis, l2(l2r)) # Decoding layers dec1 = upsampling_block_basic(enc4, 64, kernel, enc4, concat_axis, l2(l2r)) dec2 = upsampling_block_basic(dec1, 64, kernel, enc3, concat_axis, l2(l2r)) dec3 = upsampling_block_basic(dec2, 64, kernel, enc2, concat_axis, l2(l2r)) dec4 = upsampling_block_basic(dec3, 64, kernel, enc1, concat_axis, l2(l2r)) l1 = Convolution2D(n_classes, 1, 1, border_mode='valid')(dec4) score = CropLayer2D(inp, name='score')(l1) predictions = NdSoftmax()(score) return predictions
def create_classifier(body, data, n_classes, l2_reg=0.): # Include last layers top = BatchNormalization(mode=0, axis=channel_idx, name="bn7")(body) top = Activation('relu', name="relu7")(top) top = AtrousConvolution2D(512, 3, 3, 'he_normal', atrous_rate=(12, 12), border_mode='same', name="conv6a", W_regularizer=l2(l2_reg))(top) top = Activation('relu', name="conv6a_relu")(top) name = "hyperplane_num_cls_%d_branch_%d" % (n_classes, 12) def my_init(shape, name=None, dim_ordering='th'): return initializations.normal(shape, scale=0.01, name=name) top = AtrousConvolution2D(n_classes, 3, 3, my_init, atrous_rate=(12, 12), border_mode='same', name=name, W_regularizer=l2(l2_reg))(top) top = Deconvolution2D(n_classes, 16, 16, top._keras_shape, bilinear_init, 'linear', border_mode='valid', subsample=(8, 8), bias=False, name="upscaling_" + str(n_classes), W_regularizer=l2(l2_reg))(top) top = CropLayer2D(data, name='score')(top) top = NdSoftmax()(top) return top
def build_segnet_basic(inputs, n_classes, depths=[64, 128, 256, 512, 512], filter_size=7, l2_reg=0.): """ encoding layers """ enc1 = downsampling_block_basic(inputs, depths[0], filter_size, l2(l2_reg)) enc2 = downsampling_block_basic(enc1, depths[1], filter_size, l2(l2_reg)) enc3 = downsampling_block_basic(enc2, depths[2], filter_size, l2(l2_reg)) enc4 = downsampling_block_basic(enc3, depths[3], filter_size, l2(l2_reg)) enc5 = downsampling_block_basic(enc4, depths[4], filter_size, l2(l2_reg)) """ decoding layers """ dec1 = upsampling_block_basic(enc5, depths[4], filter_size, enc4, l2(l2_reg)) dec2 = upsampling_block_basic(dec1, depths[3], filter_size, enc3, l2(l2_reg)) dec3 = upsampling_block_basic(dec2, depths[2], filter_size, enc2, l2(l2_reg)) dec4 = upsampling_block_basic(dec3, depths[1], filter_size, enc1, l2(l2_reg)) dec5 = upsampling_block_basic(dec4, depths[0], filter_size, enc1, l2(l2_reg)) """ logits """ conv10 = Convolution2D(n_classes, 1, 1, border_mode='valid', init='he_normal', activation='sigmoid')(dec5) output = CropLayer2D(inputs, name='output')(conv10) #softmax_segnet = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=output) return model
def transition_up_Layer(skip_connection, block_to_upsample, n_filters_keep): if K.image_dim_ordering() == 'th': concat_axis = 1 # sizeSC = [skip_connection._keras_shape[2], skip_connection._keras_shape[3]] # sizeX = [x._keras_shape[2], x._keras_shape[3]] elif K.image_dim_ordering() == 'tf': concat_axis = -1 # sizeSC = [skip_connection._keras_shape[1], skip_connection._keras_shape[2]] # sizeX = [x._keras_shape[1], x._keras_shape[2]] x = merge(block_to_upsample, mode='concat', concat_axis=concat_axis) print('shape_x:' + str(x._keras_shape)) x = Deconvolution2D(n_filters_keep, 3, 3, input_shape=x._keras_shape, activation='linear', border_mode='valid', subsample=(2, 2))(x) print('shape_x_deconv:' + str(x._keras_shape)) x_crop = CropLayer2D(skip_connection)(x) print('shape:' + str(x_crop._keras_shape)) x = merge([x_crop, skip_connection], mode='concat', concat_axis=concat_axis) # print('shape_skip_connection:' + str(skip_connection._keras_shape)) # newSkip = CropLayer2D(x)(skip_connection) # # # # x = merge([x, newSkip], mode = 'concat', concat_axis = concat_axis) print('shape_merge:' + str(x._keras_shape)) return x
def build_unet(img_shape=(3, None, None), nclasses=8, l2_reg=0., init='glorot_uniform', path_weights=None, freeze_layers_from=None, padding=100, dropout=True): # Regularization warning if l2_reg > 0.: print("Regularizing the weights: " + str(l2_reg)) # Input inputs = Input(img_shape, name='input') padded = ZeroPadding2D(padding=(padding, padding), name='padded')(inputs) # Block 1 conv1_1 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='conv1_1', W_regularizer=l2(l2_reg))(padded) conv1_2 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='conv1_2', W_regularizer=l2(l2_reg))(conv1_1) pool1 = MaxPooling2D((2, 2), (2, 2), name='pool1')(conv1_2) # Block 2 conv2_1 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='conv2_1', W_regularizer=l2(l2_reg))(pool1) conv2_2 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='conv2_2', W_regularizer=l2(l2_reg))(conv2_1) pool2 = MaxPooling2D((2, 2), (2, 2), name='pool2')(conv2_2) # Block 3 conv3_1 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='conv3_1', W_regularizer=l2(l2_reg))(pool2) conv3_2 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='conv3_2', W_regularizer=l2(l2_reg))(conv3_1) pool3 = MaxPooling2D((2, 2), (2, 2), name='pool3')(conv3_2) # Block 4 conv4_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='conv4_1', W_regularizer=l2(l2_reg))(pool3) conv4_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='conv4_2', W_regularizer=l2(l2_reg))(conv4_1) if dropout: conv4_2 = Dropout(0.5, name='drop1')(conv4_2) pool4 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv4_2) # Block 5 conv5_1 = Convolution2D(1024, 3, 3, init, 'relu', border_mode='valid', name='conv5_1', W_regularizer=l2(l2_reg))(pool4) conv5_2 = Convolution2D(1024, 3, 3, init, 'relu', border_mode='valid', name='conv5_2', W_regularizer=l2(l2_reg))(conv5_1) if dropout: conv5_2 = Dropout(0.5, name='drop2')(conv5_2) # pool5 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv5_2) # Upsampling 1 upconv4 = Deconvolution2D(512, 2, 2, conv5_2._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='upconv4', W_regularizer=l2(l2_reg))(conv5_2) conv4_2_crop = CropLayer2D(upconv4, name='conv4_2_crop')(conv4_2) upconv4_crop = CropLayer2D(upconv4, name='upconv4_crop')(upconv4) Concat_4 = merge([conv4_2_crop, upconv4_crop], mode='concat', concat_axis=3, name='Concat_4') conv6_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='conv6_1', W_regularizer=l2(l2_reg))(Concat_4) conv6_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='conv6_2', W_regularizer=l2(l2_reg))(conv6_1) # Upsampling 2 upconv3 = Deconvolution2D(256, 2, 2, conv6_2._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='upconv3', W_regularizer=l2(l2_reg))(conv6_2) conv3_2_crop = CropLayer2D(upconv3, name='conv3_2_crop')(conv3_2) Concat_3 = merge([conv3_2_crop, upconv3], mode='concat', name='Concat_3') conv7_1 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='conv7_1', W_regularizer=l2(l2_reg))(Concat_3) conv7_2 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='conv7_2', W_regularizer=l2(l2_reg))(conv7_1) # Upsampling 3 upconv2 = Deconvolution2D(128, 2, 2, conv7_2._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='upconv2', W_regularizer=l2(l2_reg))(conv7_2) conv2_2_crop = CropLayer2D(upconv2, name='conv2_2_crop')(conv2_2) Concat_2 = merge([conv2_2_crop, upconv2], mode='concat', name='Concat_2') conv8_1 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='conv8_1', W_regularizer=l2(l2_reg))(Concat_2) conv8_2 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='conv8_2', W_regularizer=l2(l2_reg))(conv8_1) # Upsampling 4 upconv1 = Deconvolution2D(64, 2, 2, conv8_2._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='upconv1', W_regularizer=l2(l2_reg))(conv8_2) conv1_2_crop = CropLayer2D(upconv1, name='conv1_2_crop')(conv1_2) Concat_1 = merge([conv1_2_crop, upconv1], mode='concat', name='Concat_1') conv9_1 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='conv9_1', W_regularizer=l2(l2_reg))(Concat_1) conv9_2 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='conv9_2', W_regularizer=l2(l2_reg))(conv9_1) conv10 = Convolution2D(nclasses, 1, 1, init, 'linear', border_mode='valid', name='conv10', W_regularizer=l2(l2_reg))(conv9_2) # Crop final_crop = CropLayer2D(inputs, name='final_crop')(conv10) # Softmax softmax_unet = NdSoftmax()(final_crop) # Complete model model = Model(input=inputs, output=softmax_unet) # Load pretrained Model if path_weights: pass # Freeze some layers if freeze_layers_from is not None: freeze_layers(model, freeze_layers_from) return model
def build_segnet(img_shape=(3, None, None), nclasses=8, l2_reg=0., init='glorot_uniform', path_weights=None, freeze_layers_from=None): # Regularization warning if l2_reg > 0.: print("Regularizing the weights: " + str(l2_reg)) # Build network: #Encoding Block # Input layer inputs = Input(shape=img_shape) padded = ZeroPadding2D(padding=(10, 10), name='pad100')(inputs) #Enc Block 1 X = batched_conv(padded, 64, 3, 3, l2_reg, 1, 'Enc') X = batched_conv(X, 64, 3, 3, l2_reg, 2, 'Enc') X = MaxPooling2D(name='Enc_MaxP_1')(X) #Enc Block 2 X = batched_conv(X, 128, 3, 3, l2_reg, 3, 'Enc') X = batched_conv(X, 128, 3, 3, l2_reg, 4, 'Enc') X = MaxPooling2D(name='Enc_MaxP_2')(X) #Enc Block 3 X = batched_conv(X, 256, 3, 3, l2_reg, 5, 'Enc') X = batched_conv(X, 256, 3, 3, l2_reg, 6, 'Enc') X = batched_conv(X, 256, 3, 3, l2_reg, 7, 'Enc') X = MaxPooling2D(name='Enc_MaxP_3')(X) #Enc Block 4 X = batched_conv(X, 512, 3, 3, l2_reg, 8, 'Enc') X = batched_conv(X, 515, 3, 3, l2_reg, 9, 'Enc') X = batched_conv(X, 512, 3, 3, l2_reg, 10, 'Enc') X = MaxPooling2D(name='Enc_MaxP_4')(X) #Enc Block 5 X = batched_conv(X, 512, 3, 3, l2_reg, 11, 'Enc') X = batched_conv(X, 512, 3, 3, l2_reg, 12, 'Enc') X = batched_conv(X, 512, 3, 3, l2_reg, 13, 'Enc') X = MaxPooling2D(name='Enc_MaxP_5')(X) #Dec Block 1 X = UpSampling2D(name='Dec_Ups_1')(X) X = batched_conv(X, 512, 3, 3, l2_reg, 1, 'Dec') X = batched_conv(X, 512, 3, 3, l2_reg, 2, 'Dec') X = batched_conv(X, 512, 3, 3, l2_reg, 3, 'Dec') #Dec Block 2 X = UpSampling2D(name='Dec_Ups_2')(X) X = batched_conv(X, 512, 3, 3, l2_reg, 4, 'Dec') X = batched_conv(X, 512, 3, 3, l2_reg, 5, 'Dec') X = batched_conv(X, 512, 3, 3, l2_reg, 6, 'Dec') #Dec Block 3 X = UpSampling2D(name='Dec_Ups_3')(X) X = batched_conv(X, 256, 3, 3, l2_reg, 7, 'Dec') X = batched_conv(X, 256, 3, 3, l2_reg, 8, 'Dec') X = batched_conv(X, 256, 3, 3, l2_reg, 9, 'Dec') #Dec Block 4 X = UpSampling2D(name='Dec_Ups_4')(X) X = batched_conv(X, 128, 3, 3, l2_reg, 10, 'Dec') X = batched_conv(X, 128, 3, 3, l2_reg, 11, 'Dec') #Dec Block 5 X = UpSampling2D(name='Dec_Ups_5')(X) X = batched_conv(X, 128, 3, 3, l2_reg, 12, 'Dec') X = batched_conv(X, 128, 3, 3, l2_reg, 13, 'Dec') # Softmax X = CropLayer2D(inputs, name='Crop')(X) #X = Reshape(img_shape)(X) softmax_segnet = NdSoftmax()(X) #print img_shape #X = Reshape((5, 288 * 384))(X) #X = Permute((2, 1))(X) #softmax_segnet = Activation('softmax')(X) # Complete model model = Model(input=inputs, output=softmax_segnet) # Load pretrained Model if path_weights: load_matcovnet(model, path_weights, n_classes=nclasses) # Freeze some layers if freeze_layers_from is not None: freeze_layers(model, freeze_layers_from) return model
def build_fcn8(img_shape=(3, None, None), nclasses=8, l2_reg=0., init='glorot_uniform', path_weights=None, freeze_layers_from=None): # Regularization warning if l2_reg > 0.: print("Regularizing the weights: " + str(l2_reg)) # Build network # CONTRACTING PATH # Input layer inputs = Input(img_shape) padded = ZeroPadding2D(padding=(100, 100), name='pad100')(inputs) # Block 1 conv1_1 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='conv1_1', W_regularizer=l2(l2_reg))(padded) conv1_2 = Convolution2D(64, 3, 3, init, 'relu', border_mode='same', name='conv1_2', W_regularizer=l2(l2_reg))(conv1_1) pool1 = MaxPooling2D((2, 2), (2, 2), name='pool1')(conv1_2) # Block 2 conv2_1 = Convolution2D(128, 3, 3, init, 'relu', border_mode='same', name='conv2_1', W_regularizer=l2(l2_reg))(pool1) conv2_2 = Convolution2D(128, 3, 3, init, 'relu', border_mode='same', name='conv2_2', W_regularizer=l2(l2_reg))(conv2_1) pool2 = MaxPooling2D((2, 2), (2, 2), name='pool2')(conv2_2) # Block 3 conv3_1 = Convolution2D(256, 3, 3, init, 'relu', border_mode='same', name='conv3_1', W_regularizer=l2(l2_reg))(pool2) conv3_2 = Convolution2D(256, 3, 3, init, 'relu', border_mode='same', name='conv3_2', W_regularizer=l2(l2_reg))(conv3_1) conv3_3 = Convolution2D(256, 3, 3, init, 'relu', border_mode='same', name='conv3_3', W_regularizer=l2(l2_reg))(conv3_2) pool3 = MaxPooling2D((2, 2), (2, 2), name='pool3')(conv3_3) # Block 4 conv4_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='same', name='conv4_1', W_regularizer=l2(l2_reg))(pool3) conv4_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='same', name='conv4_2', W_regularizer=l2(l2_reg))(conv4_1) conv4_3 = Convolution2D(512, 3, 3, init, 'relu', border_mode='same', name='conv4_3', W_regularizer=l2(l2_reg))(conv4_2) pool4 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv4_3) # Block 5 conv5_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='same', name='conv5_1', W_regularizer=l2(l2_reg))(pool4) conv5_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='same', name='conv5_2', W_regularizer=l2(l2_reg))(conv5_1) conv5_3 = Convolution2D(512, 3, 3, init, 'relu', border_mode='same', name='conv5_3', W_regularizer=l2(l2_reg))(conv5_2) pool5 = MaxPooling2D((2, 2), (2, 2), name='pool5')(conv5_3) # Block 6 (fully conv) fc6 = Convolution2D(4096, 7, 7, init, 'relu', border_mode='valid', name='fc6', W_regularizer=l2(l2_reg))(pool5) fc6 = Dropout(0.5)(fc6) # Block 7 (fully conv) fc7 = Convolution2D( 4096, 1, 1, init, 'relu', border_mode='valid', name='fc7', W_regularizer=l2(l2_reg), )(fc6) fc7 = Dropout(0.5)(fc7) score_fr = Convolution2D(nclasses, 1, 1, init, 'relu', border_mode='valid', name='score_fr')(fc7) # DECONTRACTING PATH # Unpool 1 score_pool4 = Convolution2D(nclasses, 1, 1, init, 'relu', border_mode='same', name='score_pool4', W_regularizer=l2(l2_reg))(pool4) score2 = Deconvolution2D(nclasses, 4, 4, score_fr._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='score2', W_regularizer=l2(l2_reg))(score_fr) score_pool4_crop = CropLayer2D(score2, name='score_pool4_crop')(score_pool4) score_fused = merge([score_pool4_crop, score2], mode=custom_sum, output_shape=custom_sum_shape, name='score_fused') # Unpool 2 score_pool3 = Convolution2D(nclasses, 1, 1, init, 'relu', border_mode='valid', name='score_pool3', W_regularizer=l2(l2_reg))(pool3) score4 = Deconvolution2D( nclasses, 4, 4, score_fused._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), bias=True, # TODO: No bias?? name='score4', W_regularizer=l2(l2_reg))(score_fused) score_pool3_crop = CropLayer2D(score4, name='score_pool3_crop')(score_pool3) score_final = merge([score_pool3_crop, score4], mode=custom_sum, output_shape=custom_sum_shape, name='score_final') upsample = Deconvolution2D( nclasses, 16, 16, score_final._keras_shape, init, 'linear', border_mode='valid', subsample=(8, 8), bias=False, # TODO: No bias?? name='upsample', W_regularizer=l2(l2_reg))(score_final) score = CropLayer2D(inputs, name='score')(upsample) # Softmax softmax_fcn8 = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=softmax_fcn8) # Load pretrained Model if path_weights: load_matcovnet(model, n_classes=nclasses) # Freeze some layers if freeze_layers_from is not None: freeze_layers(model, freeze_layers_from) return model
def __create_fcn_dense_net(nb_classes, img_input, include_top, nb_dense_block=5, growth_rate=12, reduction=0.0, dropout_rate=None, weight_decay=1E-4, nb_layers_per_block=4, nb_upsampling_conv=128, upsampling_type='upsampling', batchsize=None, init_conv_filters=48, input_shape=None, activation='softmax', is_training=True): concat_axis = -1 rows, cols, _ = input_shape if reduction != 0.0: assert reduction <= 1.0 and reduction > 0.0, "reduction value must lie between 0.0 and 1.0" # check if upsampling_conv has minimum number of filters # minimum is set to 12, as at least 3 color channels are needed for correct upsampling assert nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0, "Parameter `upsampling_conv` number of channels must " \ "be a positive number divisible by 4 and greater " \ "than 12" # layers in each dense block if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple: nb_layers = list(nb_layers_per_block) # Convert tuple to list assert len(nb_layers) == (nb_dense_block + 1), "If list, nb_layer is used as provided. " \ "Note that list size must be (nb_dense_block + 1)" bottleneck_nb_layers = nb_layers[-1] rev_layers = nb_layers[::-1] nb_layers.extend(rev_layers[1:]) else: bottleneck_nb_layers = nb_layers_per_block nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1) # compute compression factor compression = 1.0 - reduction # Initial convolution '''x = Convolution2D(init_conv_filters, (3, 3), kernel_initializer="he_uniform", padding="same",name="initial_conv2D", use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)''' x = tf.contrib.slim.conv2d( img_input, init_conv_filters, [3, 3], padding='SAME', weights_regularizer=tf.contrib.slim.l2_regularizer(weight_decay), weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), scope='initial_conv2D') nb_filter = init_conv_filters skip_list = [] # Add dense blocks and transition down block for block_idx in range(nb_dense_block): x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay, block='block_down' + str(block_idx + 1), is_training=is_training) # Skip connection skip_list.append(x) # add transition_block x = __transition_block(x, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay, block='block_down' + str(block_idx + 1), is_training=is_training) nb_filter = int( nb_filter * compression) # this is calculated inside transition_down_block # The last dense_block does not have a transition_down_block # return the concatenated feature maps without the concatenation of the input _, nb_filter, concat_list = __dense_block(x, bottleneck_nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay, return_concat_list=True, block='block_down' + str(nb_dense_block + 1), is_training=is_training) skip_list = skip_list[::-1] # reverse the skip list #out_shape = [batchsize, rows // 16, cols // 16, nb_filter] # Add dense blocks and transition up block for block_idx in range(nb_dense_block): n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx] #out_shape[3] = n_filters_keep # upsampling block must upsample only the feature maps (concat_list[1:]), # not the concatenation of the input with the feature maps (concat_list[0]. #l = concatenate(concat_list[1:], axis=concat_axis, name='concat1_block_up' + str(block_idx+1)) l = tf.concat(concat_list[1:], axis=concat_axis, name='concat1_block_up' + str(block_idx + 1)) t = __transition_up_block(l, nb_filters=n_filters_keep, type=upsampling_type, block='block_up' + str(block_idx + 1)) t = CropLayer2D(skip_list[block_idx], name='crop_block_up' + str(block_idx + 1))(t) # concatenate the skip connection with the transition block #x = concatenate([t, skip_list[block_idx]], axis=concat_axis, name='concat2_block_up' + str(block_idx+1)) x = tf.concat([t, skip_list[block_idx]], axis=concat_axis, name='concat2_block_up' + str(block_idx + 1)) '''out_shape[1] *= 2 out_shape[2] *= 2''' # Dont allow the feature map size to grow in upsampling dense blocks _, nb_filter, concat_list = __dense_block( x, nb_layers[nb_dense_block + block_idx + 1], nb_filter=growth_rate, growth_rate=growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay, return_concat_list=True, grow_nb_filters=False, block='block_up' + str(block_idx + 1)) if include_top: '''x = Convolution2D(nb_classes, (1, 1), activation='linear', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, name='convTop')(x)''' x = tf.contrib.slim.conv2d( x, nb_classes, [1, 1], padding='SAME', weights_regularizer=tf.contrib.slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu, scope='convTop') x = CropLayer2D(img_input, name='score')(x) #x = NdSoftmax(x) x = tf.nn.softmax(x) return x
def build_densenet_segmentation(in_shape=(3, 224, 224), n_classes=1000, weight_decay=0., freeze_layers_from='base_model', path_weights=None): ##################### # First Convolution # ##################### print('Input shape:' + str(in_shape)) inp = Input(shape=in_shape) n_filter = 48 x = Convolution2D(n_filter, 3, 3, subsample=(1, 1), border_mode='same')(inp) ##################### # Downsampling path # ##################### growth_rate = 16 dropout_fraction = 0.2 n_layers_down = [4, 5, 7, 10, 12] skip_connection_list = [] for i in range(len(n_layers_down)): #Dense block x, n_filter = denseBlock_down(x, n_layers_down[i], growth_rate, n_filter, dropout_fraction) print('number of filters = ' + str(x._keras_shape[-1])) x = transition_down_Layer(x, n_filter, dropout_fraction) # At the end of the dense block, the current output is stored in the skip_connections list skip_connection_list.append(x) # print('Shape: ' + str(x._keras_shape)) skip_connection_list = skip_connection_list[::-1] ##################### # Bottleneck # ##################### n_layers = 15 # We store now the output of the next dense block in a list(block_to_upsample). # We will only upsample these new feature maps x, n_filter, block_to_upsample = denseBlock_up(x, n_layers, growth_rate, n_filter, dropout_fraction) print('number of filters = ' + str(x._keras_shape[-1])) # Add dense blocks of upsampling path n_layers_up = [15, 12, 10, 7, 5, 4] for j in range(1, len(n_layers_up)): # Transition Up ( Upsampling + concatenation with the skip connection) n_filters_keep = growth_rate * n_layers_up[j - 1] x = transition_up_Layer(skip_connection_list[j - 1], block_to_upsample, n_filters_keep) x, n_filter, block_to_upsample = denseBlock_up(x, n_layers_up[j], growth_rate, n_filter, dropout_fraction) print('number of filters = ' + str(x._keras_shape[-1])) x = Deconvolution2D(n_filters_keep, 3, 3, input_shape=x._keras_shape, activation='linear', border_mode='valid', subsample=(2, 2))(x) x = CropLayer2D(inp)(x) #Last convolution x = Convolution2D(n_classes, 1, 1, subsample=(1, 1), border_mode='same')(x) ##################### # Softmax # ##################### predictions = NdSoftmax()(x) print('Predictions_shape: ' + str(predictions._keras_shape)) # This is the model we will train model = Model(input=inp, output=predictions) model.summary() # Freeze some layers if freeze_layers_from is not None: if freeze_layers_from == 'base_model': print(' Freezing base model layers') for layer in model.layers: layer.trainable = False else: for i, layer in enumerate(model.layers): print(i, layer.name) print(' Freezing from layer 0 to ' + str(freeze_layers_from)) for layer in model.layers[:freeze_layers_from]: layer.trainable = False for layer in model.layers[freeze_layers_from:]: layer.trainable = True return model
def build_fcn8(img_shape, x_shape=None, dim_ordering='th', l2_reg=0., nclasses=8, x_test_val=None, weights_file=False, **kwargs): # For Theano debug prouposes if x_test_val is not None: inputs.tag.test_value = x_test_val theano.config.compute_test_value = "warn" do = dim_ordering # Regularization warning if l2_reg > 0.: print("Regularizing the weights: " + str(l2_reg)) # Build network # CONTRACTING PATH # Input layer inputs = Input(img_shape) sh = inputs._keras_shape padded = ZeroPadding2D(padding=(100, 100), dim_ordering=do, name='pad100')(inputs) # Block 1 conv1_1 = Convolution2D(64, 3, 3, activation='relu', border_mode='valid', dim_ordering=do, name='conv1_1', W_regularizer=l2(l2_reg), trainable=True)(padded) conv1_2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv1_2', W_regularizer=l2(l2_reg), trainable=True)(conv1_1) pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering=do, name='pool1')(conv1_2) # Block 2 conv2_1 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv2_1', W_regularizer=l2(l2_reg), trainable=True)(pool1) conv2_2 = Convolution2D(128, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv2_2', W_regularizer=l2(l2_reg), trainable=True)(conv2_1) pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering=do, name='pool2')(conv2_2) # Block 3 conv3_1 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv3_1', W_regularizer=l2(l2_reg), trainable=True)(pool2) conv3_2 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv3_2', W_regularizer=l2(l2_reg), trainable=True)(conv3_1) conv3_3 = Convolution2D(256, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv3_3', W_regularizer=l2(l2_reg), trainable=True)(conv3_2) pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering=do, name='pool3')(conv3_3) # Block 4 conv4_1 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv4_1', W_regularizer=l2(l2_reg), trainable=True)(pool3) conv4_2 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv4_2', W_regularizer=l2(l2_reg), trainable=True)(conv4_1) conv4_3 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv4_3', W_regularizer=l2(l2_reg), trainable=True)(conv4_2) pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering=do, name='pool4')(conv4_3) # Block 5 conv5_1 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv5_1', W_regularizer=l2(l2_reg), trainable=True)(pool4) conv5_2 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv5_2', W_regularizer=l2(l2_reg), trainable=True)(conv5_1) conv5_3 = Convolution2D(512, 3, 3, activation='relu', border_mode='same', dim_ordering=do, name='conv5_3', W_regularizer=l2(l2_reg), trainable=True)(conv5_2) pool5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering=do, name='pool5')(conv5_3) # Block 6 (fully conv) fc6 = Convolution2D(4096, 7, 7, activation='relu', border_mode='valid', dim_ordering=do, name='fc6', W_regularizer=l2(l2_reg), trainable=True)(pool5) fc6 = Dropout(0.5)(fc6) # Block 7 (fully conv) fc7 = Convolution2D(4096, 1, 1, activation='relu', border_mode='valid', dim_ordering=do, name='fc7', W_regularizer=l2(l2_reg), trainable=True)(fc6) fc7 = Dropout(0.5)(fc7) score_fr = Convolution2D(nclasses, 1, 1, activation='relu', border_mode='valid', dim_ordering=do, name='score_fr')(fc7) # DECONTRACTING PATH # Unpool 1 score_pool4 = Convolution2D(nclasses, 1, 1, activation='relu', border_mode='same', dim_ordering=do, name='score_pool4', W_regularizer=l2(l2_reg), trainable=True)(pool4) score2 = Deconvolution2D(nb_filter=nclasses, nb_row=4, nb_col=4, input_shape=score_fr._keras_shape, subsample=(2, 2), border_mode='valid', activation='linear', W_regularizer=l2(l2_reg), dim_ordering=do, trainable=True, name='score2')(score_fr) score_pool4_crop = CropLayer2D(score2, dim_ordering=do, name='score_pool4_crop')(score_pool4) score_fused = merge([score_pool4_crop, score2], mode=custom_sum, output_shape=custom_sum_shape, name='score_fused') # Unpool 2 score_pool3 = Convolution2D(nclasses, 1, 1, activation='relu', border_mode='valid', dim_ordering=do, W_regularizer=l2(l2_reg), trainable=True, name='score_pool3')(pool3) score4 = Deconvolution2D(nb_filter=nclasses, nb_row=4, nb_col=4, input_shape=score_fused._keras_shape, subsample=(2, 2), border_mode='valid', activation='linear', W_regularizer=l2(l2_reg), dim_ordering=do, trainable=True, name='score4', bias=False)(score_fused) # TODO: No bias?? score_pool3_crop = CropLayer2D(score4, dim_ordering=do, name='score_pool3_crop')(score_pool3) score_final = merge([score_pool3_crop, score4], mode=custom_sum, output_shape=custom_sum_shape, name='score_final') # Unpool 3 upsample = Deconvolution2D(nb_filter=nclasses, nb_row=16, nb_col=16, input_shape=score_final._keras_shape, subsample=(8, 8), border_mode='valid', activation='linear', W_regularizer=l2(l2_reg), dim_ordering=do, trainable=True, name='upsample', bias=False)(score_final) # TODO: No bias?? score = CropLayer2D(inputs, dim_ordering=do, name='score')(upsample) # Softmax if do == 'th': softmax_fcn8 = NdSoftmax(1)(score) else: softmax_fcn8 = NdSoftmax(3)(score) # Complete model net = Model(input=inputs, output=softmax_fcn8) # Load weights if weights_file: print(' > Loading weights from pretrained model: ' + weights_file) net.load_weights(weights_file) return net
def build_unet(img_shape=(3, None, None), nclasses=8, l2_reg=0., init='glorot_uniform', freeze_layers_from=None, path_weights=None): # Regularization warning if l2_reg > 0.: print("Regularizing the weights: " + str(l2_reg)) # Build network # CONTRACTING PATH # Input layer inputs = Input(img_shape) padded1 = ZeroPadding2D(padding=(100, 100), name='padded1')(inputs) # Block 1 conv1_1 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='conv1_1', W_regularizer=l2(l2_reg))(padded1) conv1_2 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='conv1_2', W_regularizer=l2(l2_reg))(conv1_1) pool1 = MaxPooling2D(pool_size=(2, 2), name='pool1')(conv1_2) # Block 2 conv2_1 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='conv2_1', W_regularizer=l2(l2_reg))(pool1) conv2_2 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='conv2_2', W_regularizer=l2(l2_reg))(conv2_1) pool2 = MaxPooling2D(pool_size=(2, 2), name='pool2')(conv2_2) # Block 3 conv3_1 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='conv3_1', W_regularizer=l2(l2_reg))(pool2) conv3_2 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='conv3_2', W_regularizer=l2(l2_reg))(conv3_1) pool3 = MaxPooling2D(pool_size=(2, 2), name='pool3')(conv3_2) # Block 4 conv4_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='conv4_1', W_regularizer=l2(l2_reg))(pool3) conv4_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='conv4_2', W_regularizer=l2(l2_reg))(conv4_1) conv4_drop = Dropout(0.5, name='conv4_drop')(conv4_2) pool4 = MaxPooling2D(pool_size=(2, 2), name='pool4')(conv4_drop) # Block 5 bottom_conv1 = Convolution2D(1024, 3, 3, init, 'relu', border_mode='valid', name='bottom_conv1', W_regularizer=l2(l2_reg))(pool4) bottom_conv2 = Convolution2D(1024, 3, 3, init, 'relu', border_mode='valid', name='bottom_conv2', W_regularizer=l2(l2_reg))(bottom_conv1) bottom_drop = Dropout(0.5, name='bottom_drop')(bottom_conv2) #uncoder blocks # Block 6 deconv4 = Deconvolution2D(512, 2, 2, bottom_drop._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='deconv4', W_regularizer=l2(l2_reg))(bottom_drop) conv4_crop = CropLayer2D(deconv4, name='conv4_crop')(conv4_drop) deconv4_crop = CropLayer2D(deconv4, name='deconv4_crop')(deconv4) deconv4_concat = merge([deconv4_crop, conv4_crop], mode='concat', concat_axis=3, name='deconv4_concat') deconv4_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='deconv4_1', W_regularizer=l2(l2_reg))(deconv4_concat) deconv4_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid', name='deconv4_2', W_regularizer=l2(l2_reg))(deconv4_1) # Block 7 deconv3 = Deconvolution2D(256, 2, 2, deconv4_2._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='deconv3', W_regularizer=l2(l2_reg))(deconv4_2) deconv3_crop = CropLayer2D(deconv3, name='deconv3_crop')(conv3_2) deconv3_concat = merge([deconv3_crop, deconv3], mode='concat', concat_axis=3, name='deconv3_concat') deconv3_1 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='deconv3_1', W_regularizer=l2(l2_reg))(deconv3_concat) deconv3_2 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid', name='deconv3_2', W_regularizer=l2(l2_reg))(deconv3_1) # Block 8 deconv2 = Deconvolution2D(128, 2, 2, deconv3_2._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='deconv2', W_regularizer=l2(l2_reg))(deconv3_2) deconv2_crop = CropLayer2D(deconv2, name='deconv2_crop')(conv2_2) deconv2_concat = merge([deconv2_crop, deconv2], mode='concat', concat_axis=3, name='deconv2_concat') deconv2_1 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='deconv2_1', W_regularizer=l2(l2_reg))(deconv2_concat) deconv2_2 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid', name='deconv2_2', W_regularizer=l2(l2_reg))(deconv2_1) # Block 9 deconv1 = Deconvolution2D(64, 2, 2, deconv2_2._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='deconv1', W_regularizer=l2(l2_reg))(deconv2_2) deconv1_crop = CropLayer2D(deconv1, name='deconv1_crop')(conv1_2) deconv1_concat = merge([deconv1_crop, deconv1], mode='concat', concat_axis=3, name='deconv1_concat') deconv1_1 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='deconv1_1', W_regularizer=l2(l2_reg))(deconv1_concat) deconv1_2 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid', name='deconv1_2', W_regularizer=l2(l2_reg))(deconv1_1) l1 = Convolution2D(nclasses, 1, 1, border_mode='valid', name='logits')(deconv1_2) score = CropLayer2D(inputs, name='score')(l1) # Softmax softmax_segnet = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=softmax_segnet) # Load pretrained Model if path_weights: load_matcovnet(model, n_classes=nclasses) # Freeze some layers if freeze_layers_from is not None: freeze_layers(model, freeze_layers_from) return model
def build_segnet_vgg(img_shape=(3, None, None), nclasses=8, l2_reg=0., init='glorot_uniform', freeze_layers_from=None, path_weights=None): # Regularization warning if l2_reg > 0.: print("Regularizing the weights: " + str(l2_reg)) # Build network # CONTRACTING PATH # Input layer inputs = Input(img_shape) padded1 = ZeroPadding2D(padding=(1, 1), name='pad1')(inputs) # Block 1 conv1_1 = segnet_conv2D(padded1, 64, 3, 3, init, block='1', layer='1', l2_reg=l2_reg) conv1_2 = segnet_conv2D(conv1_1, 64, 3, 3, init, block='1', layer='2', l2_reg=l2_reg) pool1 = MaxPooling2D((2, 2), (2, 2), name='pool1')(conv1_2) # Block 2 padded2 = ZeroPadding2D(padding=(1, 1), name='pad2')(pool1) conv2_1 = segnet_conv2D(padded2, 128, 3, 3, init, block='2', layer='1', l2_reg=l2_reg) conv2_2 = segnet_conv2D(conv2_1, 128, 3, 3, init, block='2', layer='2', l2_reg=l2_reg) pool2 = MaxPooling2D((2, 2), (2, 2), name='pool2')(conv2_2) # Block 3 padded3 = ZeroPadding2D(padding=(1, 1), name='pad3')(pool2) conv3_1 = segnet_conv2D(padded3, 256, 3, 3, init, block='3', layer='1', l2_reg=l2_reg) conv3_2 = segnet_conv2D(conv3_1, 256, 3, 3, init, block='3', layer='2', l2_reg=l2_reg) conv3_3 = segnet_conv2D(conv3_2, 256, 3, 3, init, block='3', layer='3', l2_reg=l2_reg) pool3 = MaxPooling2D((2, 2), (2, 2), name='pool3')(conv3_3) # Block 4 padded4 = ZeroPadding2D(padding=(1, 1), name='pad4')(pool3) conv4_1 = segnet_conv2D(padded4, 512, 3, 3, init, block='4', layer='1', l2_reg=l2_reg) conv4_2 = segnet_conv2D(conv4_1, 512, 3, 3, init, block='4', layer='2', l2_reg=l2_reg) conv4_3 = segnet_conv2D(conv4_2, 512, 3, 3, init, block='4', layer='3', l2_reg=l2_reg) pool4 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv4_3) # Block 5 padded5 = ZeroPadding2D(padding=(1, 1), name='pad5')(pool4) conv5_1 = segnet_conv2D(padded5, 512, 3, 3, init, block='5', layer='1', l2_reg=l2_reg) conv5_2 = segnet_conv2D(conv5_1, 512, 3, 3, init, block='5', layer='2', l2_reg=l2_reg) conv5_3 = segnet_conv2D(conv5_2, 512, 3, 3, init, block='5', layer='3', l2_reg=l2_reg) pool5 = MaxPooling2D((2, 2), (2, 2), name='pool5')(conv5_3) # ##### decoding layers # Block 6: Unpooling block 5 unpool5 = DePool2D(pool2d_layer=pool5, size=(2, 2), name='unpool_block5')(pool5) conv6_1 = segnet_conv2D(unpool5, 512, 3, 3, init, block='6', layer='1', l2_reg=l2_reg) conv6_2 = segnet_conv2D(conv6_1, 512, 3, 3, init, block='6', layer='2', l2_reg=l2_reg) conv6_3 = segnet_conv2D(conv6_2, 512, 3, 3, init, block='6', layer='3', l2_reg=l2_reg) # Block 7: Unpooling block 4 unpool4 = DePool2D(pool2d_layer=pool4, size=(2, 2), name='unpool_block4')(conv6_3) conv7_1 = segnet_conv2D(unpool4, 512, 3, 3, init, block='7', layer='1', l2_reg=l2_reg) conv7_2 = segnet_conv2D(conv7_1, 512, 3, 3, init, block='7', layer='2', l2_reg=l2_reg) conv7_3 = segnet_conv2D(conv7_2, 512, 3, 3, init, block='7', layer='3', l2_reg=l2_reg) # Block 8: Unpooling block 3 unpool3 = DePool2D(pool2d_layer=pool3, size=(2, 2), name='unpool_block3')(conv7_3) conv8_1 = segnet_conv2D(unpool3, 256, 3, 3, init, block='8', layer='1', l2_reg=l2_reg) conv8_2 = segnet_conv2D(conv8_1, 256, 3, 3, init, block='8', layer='2', l2_reg=l2_reg) conv8_3 = segnet_conv2D(conv8_2, 256, 3, 3, init, block='8', layer='3', l2_reg=l2_reg) # Block 9: Unpooling block 2 unpool2 = DePool2D(pool2d_layer=pool2, size=(2, 2), name='unpool_block2')(conv8_3) conv9_1 = segnet_conv2D(unpool2, 128, 3, 3, init, block='9', layer='1', l2_reg=l2_reg) conv9_2 = segnet_conv2D(conv9_1, 128, 3, 3, init, block='9', layer='2', l2_reg=l2_reg) # Block 10: Unpooling block 1 unpool1 = DePool2D(pool2d_layer=pool1, size=(2, 2), name='unpool_block1')(conv9_2) conv10_1 = segnet_conv2D(unpool1, 64, 3, 3, init, block='10', layer='1', l2_reg=l2_reg) conv10_2 = segnet_conv2D(conv10_1, 64, 3, 3, init, block='10', layer='2', l2_reg=l2_reg) l1 = Convolution2D(nclasses, 1, 1, border_mode='valid')(conv10_2) score = CropLayer2D(inputs, name='score')(l1) # Softmax softmax_segnet = NdSoftmax()(score) # Complete model model = Model(input=inputs, output=softmax_segnet) # Load pretrained Model if path_weights: load_matcovnet(model, n_classes=nclasses) # Freeze some layers if freeze_layers_from is not None: freeze_layers(model, freeze_layers_from) return model
def segnet_VGG(inp, kernel, concat_axis, n_classes): # Encoding layers: VGG 13 convolutional layers x = Convolution2D(64, kernel, kernel, border_mode='same')(inp) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(64, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) pool1 = MaxPooling2D((2, 2), strides=(2, 2))(x) x = Convolution2D(128, kernel, kernel, border_mode='same')(pool1) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(128, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) pool2 = MaxPooling2D((2, 2), strides=(2, 2))(x) x = Convolution2D(256, kernel, kernel, border_mode='same')(pool2) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(256, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(256, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) pool3 = MaxPooling2D((2, 2), strides=(2, 2))(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(pool3) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) pool4 = MaxPooling2D((2, 2), strides=(2, 2))(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(pool4) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Activation('relu')(x) pool5 = MaxPooling2D((2, 2), strides=(2, 2))(x) # Decoding layers x = DePool2D(pool2d_layer=pool5, size=(2, 2))(pool5) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = DePool2D(pool2d_layer=pool4, size=(2, 2))(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(512, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(256, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = DePool2D(pool2d_layer=pool3, size=(2, 2))(x) x = Convolution2D(256, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(256, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(128, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = DePool2D(pool2d_layer=pool2, size=(2, 2))(x) x = Convolution2D(128, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(64, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = DePool2D(pool2d_layer=pool1, size=(2, 2))(x) x = Convolution2D(64, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(64, kernel, kernel, border_mode='same')(x) x = BatchNormalization(mode=0, axis=concat_axis)(x) x = Convolution2D(n_classes, 1, 1, border_mode='valid')(x) score = CropLayer2D(inp, name='score')(x) predictions = NdSoftmax()(score) return predictions
def build_segnet(img_shape=(None, None, 3), nclasses=8, weight_decay=0., freeze_layers_from=None, path_weights=None, basic=False): # Regularization warning if weight_decay > 0.: print("Regularizing the weights: " + str(weight_decay)) # Set axis in which to do batch normalization if K.image_dim_ordering() == 'tf': bn_axis = 3 else: bn_axis = 1 # Build network # Input layer input_tensor = Input(img_shape) # Pad image to avoid size problems with pooling-unpooling padded = ZeroPadding2D(padding=(100, 100), name='pad100')(input_tensor) if not basic: # CONTRACTING PATH x = conv_block(padded, 64, 3, weight_decay, bn_axis, block='1', num='1') x = conv_block(x, 64, 3, weight_decay, bn_axis, block='1', num='2') pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool1')(x) x = conv_block(pool1, 128, 3, weight_decay, bn_axis, block='2', num='1') x = conv_block(x, 128, 3, weight_decay, bn_axis, block='2', num='2') pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block2_pool1')(x) x = conv_block(pool2, 256, 3, weight_decay, bn_axis, block='3', num='1') x = conv_block(x, 256, 3, weight_decay, bn_axis, block='3', num='2') x = conv_block(x, 256, 3, weight_decay, bn_axis, block='3', num='3') pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block3_pool1')(x) x = conv_block(pool3, 512, 3, weight_decay, bn_axis, block='4', num='1') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='4', num='2') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='4', num='3') pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block4_pool1')(x) x = conv_block(pool4, 512, 3, weight_decay, bn_axis, block='5', num='1') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='5', num='2') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='5', num='3') pool5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block5_pool1')(x) # DECONTRACTING PATH x = DePool2D(pool2d_layer=pool5, size=(2, 2), name='block6_unpool1')(pool5) x = conv_block(x, 512, 3, weight_decay, bn_axis, block='6', num='1') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='6', num='2') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='6', num='3') x = DePool2D(pool2d_layer=pool4, size=(2, 2), name='block7_unpool1')(x) x = conv_block(x, 512, 3, weight_decay, bn_axis, block='7', num='1') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='7', num='2') x = conv_block(x, 512, 3, weight_decay, bn_axis, block='7', num='3') x = DePool2D(pool2d_layer=pool3, size=(2, 2), name='block8_unpool1')(x) x = conv_block(x, 256, 3, weight_decay, bn_axis, block='8', num='1') x = conv_block(x, 256, 3, weight_decay, bn_axis, block='8', num='2') x = conv_block(x, 256, 3, weight_decay, bn_axis, block='8', num='3') x = DePool2D(pool2d_layer=pool2, size=(2, 2), name='block9_unpool1')(x) x = conv_block(x, 128, 3, weight_decay, bn_axis, block='9', num='1') x = conv_block(x, 128, 3, weight_decay, bn_axis, block='9', num='2') x = DePool2D(pool2d_layer=pool1, size=(2, 2), name='block10_unpool1')(x) x = conv_block(x, 64, 3, weight_decay, bn_axis, block='10', num='1') x = conv_block(x, nclasses, 3, weight_decay, bn_axis, block='10', num='2') elif basic: # CONTRACTING PATH x = conv_block(padded, 64, 7, weight_decay, bn_axis, block='1', num='1') x = conv_block(x, 64, 7, weight_decay, bn_axis, block='1', num='2') pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool1')(x) x = conv_block(pool1, 128, 7, weight_decay, bn_axis, block='2', num='1') x = conv_block(x, 128, 7, weight_decay, bn_axis, block='2', num='2') pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block2_pool1')(x) x = conv_block(pool2, 256, 7, weight_decay, bn_axis, block='3', num='1') x = conv_block(x, 256, 7, weight_decay, bn_axis, block='3', num='2') x = conv_block(x, 256, 7, weight_decay, bn_axis, block='3', num='3') pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block3_pool1')(x) x = conv_block(pool3, 512, 7, weight_decay, bn_axis, block='4', num='1') x = conv_block(x, 512, 7, weight_decay, bn_axis, block='4', num='2') x = conv_block(x, 512, 7, weight_decay, bn_axis, block='4', num='3') pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block4_pool1')(x) # DECONTRACTING PATH x = DePool2D(pool2d_layer=pool4, size=(2, 2), name='block7_unpool1')(pool4) x = conv_block(x, 512, 7, weight_decay, bn_axis, block='7', num='1', deconv_basic=True) x = conv_block(x, 512, 7, weight_decay, bn_axis, block='7', num='2', deconv_basic=True) x = conv_block(x, 512, 7, weight_decay, bn_axis, block='7', num='3', deconv_basic=True) x = DePool2D(pool2d_layer=pool3, size=(2, 2), name='block8_unpool1')(x) x = conv_block(x, 256, 7, weight_decay, bn_axis, block='8', num='1', deconv_basic=True) x = conv_block(x, 256, 7, weight_decay, bn_axis, block='8', num='2', deconv_basic=True) x = conv_block(x, 256, 7, weight_decay, bn_axis, block='8', num='3', deconv_basic=True) x = DePool2D(pool2d_layer=pool2, size=(2, 2), name='block9_unpool1')(x) x = conv_block(x, 128, 7, weight_decay, bn_axis, block='9', num='1', deconv_basic=True) x = conv_block(x, 128, 7, weight_decay, bn_axis, block='9', num='2', deconv_basic=True) x = DePool2D(pool2d_layer=pool1, size=(2, 2), name='block10_unpool1')(x) x = conv_block(x, 64, 7, weight_decay, bn_axis, block='10', num='1', deconv_basic=True) x = conv_block(x, nclasses, 7, weight_decay, bn_axis, block='10', num='2', deconv_basic=True) # Recover the image's original size x = CropLayer2D(input_tensor, name='score')(x) # Softmax softmax_segnet = NdSoftmax()(x) # Complete model model = Model(input=input_tensor, output=softmax_segnet) #TODO: load weights from caffe # Load pretrained Model #if path_weights: # load_matcovnet(model, path_weights, n_classes=nclasses) #TODO: review freeze layers # Freeze some layers if freeze_layers_from is not None: freeze_layers(model, freeze_layers_from) return model
def inceptionSeg(inp, kernel, concat_axis, n_classes): # Encoding layers: inception layers # print('Entra1') # base_model = InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None) # print('Entra2') # x = base_model.layers[-2].output conv1_7x7_s2 = Convolution2D(64,7,7,subsample=(2,2),border_mode='same',activation='relu',name='conv1/7x7_s2')(inp) # conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_7x7_s2) # # pool1_helper = PoolHelper()(conv1_zero_pad) # pool1_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool1/3x3_s2')(pool1_helper) pool1_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool1/3x3_s2')(conv1_7x7_s2) #pool1_norm1 = LRN(name='pool1/norm1')(pool1_3x3_s2) conv2_3x3_reduce = Convolution2D(64,1,1,border_mode='same',activation='relu',name='conv2/3x3_reduce')(pool1_3x3_s2) conv2_3x3 = Convolution2D(192,3,3,border_mode='same',activation='relu',name='conv2/3x3')(conv2_3x3_reduce) #conv2_norm2 = LRN(name='conv2/norm2')(conv2_3x3) conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_3x3) pool2_helper = PoolHelper()(conv2_zero_pad) pool2_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool2/3x3_s2')(pool2_helper) # First Inception module inception_3a_1x1 = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_3a/1x1')(pool2_3x3_s2) inception_3a_3x3_reduce = Convolution2D(96,1,1,border_mode='same',activation='relu',name='inception_3a/3x3_reduce')(pool2_3x3_s2) inception_3a_3x3 = Convolution2D(128,3,3,border_mode='same',activation='relu',name='inception_3a/3x3')(inception_3a_3x3_reduce) inception_3a_5x5_reduce = Convolution2D(16,1,1,border_mode='same',activation='relu',name='inception_3a/5x5_reduce')(pool2_3x3_s2) inception_3a_5x5 = Convolution2D(32,5,5,border_mode='same',activation='relu',name='inception_3a/5x5')(inception_3a_5x5_reduce) inception_3a_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_3a/pool')(pool2_3x3_s2) inception_3a_pool_proj = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_3a/pool_proj')(inception_3a_pool) inception_3a_output = merge([inception_3a_1x1,inception_3a_3x3,inception_3a_5x5,inception_3a_pool_proj],mode='concat',concat_axis=1,name='inception_3a/output') # Second Inception module inception_3b_1x1 = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_3b/1x1')(inception_3a_output) inception_3b_3x3_reduce = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_3b/3x3_reduce')(inception_3a_output) inception_3b_3x3 = Convolution2D(192,3,3,border_mode='same',activation='relu',name='inception_3b/3x3')(inception_3b_3x3_reduce) inception_3b_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_3b/5x5_reduce' )(inception_3a_output) inception_3b_5x5 = Convolution2D(96,5,5,border_mode='same',activation='relu',name='inception_3b/5x5' )(inception_3b_5x5_reduce) inception_3b_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_3b/pool')(inception_3a_output) inception_3b_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_3b/pool_proj' )(inception_3b_pool) inception_3b_output = merge([inception_3b_1x1,inception_3b_3x3,inception_3b_5x5,inception_3b_pool_proj],mode='concat',concat_axis=1,name='inception_3b/output') inception_3b_output_zero_pad = ZeroPadding2D(padding=(1, 1))(inception_3b_output) pool3_helper = PoolHelper()(inception_3b_output_zero_pad) pool3_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool3/3x3_s2')(pool3_helper) # Third Inception module inception_4a_1x1 = Convolution2D(192,1,1,border_mode='same',activation='relu',name='inception_4a/1x1')(pool3_3x3_s2) inception_4a_3x3_reduce = Convolution2D(96,1,1,border_mode='same',activation='relu',name='inception_4a/3x3_reduce')(pool3_3x3_s2) inception_4a_3x3 = Convolution2D(208,3,3,border_mode='same',activation='relu',name='inception_4a/3x3')(inception_4a_3x3_reduce) inception_4a_5x5_reduce = Convolution2D(16,1,1,border_mode='same',activation='relu',name='inception_4a/5x5_reduce')(pool3_3x3_s2) inception_4a_5x5 = Convolution2D(48,5,5,border_mode='same',activation='relu',name='inception_4a/5x5' )(inception_4a_5x5_reduce) inception_4a_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4a/pool')(pool3_3x3_s2) inception_4a_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4a/pool_proj')(inception_4a_pool) inception_4a_output = merge([inception_4a_1x1,inception_4a_3x3,inception_4a_5x5,inception_4a_pool_proj],mode='concat',concat_axis=1,name='inception_4a/output') loss1_ave_pool = AveragePooling2D(pool_size=(5,5),strides=(3,3),name='loss1/ave_pool')(inception_4a_output) loss1_conv = Convolution2D(128,1,1,border_mode='same',activation='relu',name='loss1/conv')(loss1_ave_pool) loss1_flat = Flatten()(loss1_conv) loss1_fc = Dense(1024,activation='relu',name='loss1/fc')(loss1_flat) loss1_drop_fc = Dropout(0.7)(loss1_fc) loss1_classifier = Dense(1000,name='loss1/classifier')(loss1_drop_fc) loss1_classifier_act = Activation('softmax')(loss1_classifier) # Fourth Inception module inception_4b_1x1 = Convolution2D(160,1,1,border_mode='same',activation='relu',name='inception_4b/1x1')(inception_4a_output) inception_4b_3x3_reduce = Convolution2D(112,1,1,border_mode='same',activation='relu',name='inception_4b/3x3_reduce')(inception_4a_output) inception_4b_3x3 = Convolution2D(224,3,3,border_mode='same',activation='relu',name='inception_4b/3x3' )(inception_4b_3x3_reduce) inception_4b_5x5_reduce = Convolution2D(24,1,1,border_mode='same',activation='relu',name='inception_4b/5x5_reduce')(inception_4a_output) inception_4b_5x5 = Convolution2D(64,5,5,border_mode='same',activation='relu',name='inception_4b/5x5')(inception_4b_5x5_reduce) inception_4b_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4b/pool')(inception_4a_output) inception_4b_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4b/pool_proj')(inception_4b_pool) inception_4b_output = merge([inception_4b_1x1,inception_4b_3x3,inception_4b_5x5,inception_4b_pool_proj],mode='concat',concat_axis=1,name='inception_4b_output') # Fifth Inception module inception_4c_1x1 = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_4c/1x1')(inception_4b_output) inception_4c_3x3_reduce = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_4c/3x3_reduce')(inception_4b_output) inception_4c_3x3 = Convolution2D(256,3,3,border_mode='same',activation='relu',name='inception_4c/3x3')(inception_4c_3x3_reduce) inception_4c_5x5_reduce = Convolution2D(24,1,1,border_mode='same',activation='relu',name='inception_4c/5x5_reduce')(inception_4b_output) inception_4c_5x5 = Convolution2D(64,5,5,border_mode='same',activation='relu',name='inception_4c/5x5')(inception_4c_5x5_reduce) inception_4c_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4c/pool')(inception_4b_output) inception_4c_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4c/pool_proj')(inception_4c_pool) inception_4c_output = merge([inception_4c_1x1,inception_4c_3x3,inception_4c_5x5,inception_4c_pool_proj],mode='concat',concat_axis=1,name='inception_4c/output') # Sixth Inception module inception_4d_1x1 = Convolution2D(112,1,1,border_mode='same',activation='relu',name='inception_4d/1x1')(inception_4c_output) inception_4d_3x3_reduce = Convolution2D(144,1,1,border_mode='same',activation='relu',name='inception_4d/3x3_reduce')(inception_4c_output) inception_4d_3x3 = Convolution2D(288,3,3,border_mode='same',activation='relu',name='inception_4d/3x3' )(inception_4d_3x3_reduce) inception_4d_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_4d/5x5_reduce')(inception_4c_output) inception_4d_5x5 = Convolution2D(64,5,5,border_mode='same',activation='relu',name='inception_4d/5x5')(inception_4d_5x5_reduce) inception_4d_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4d/pool')(inception_4c_output) inception_4d_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4d/pool_proj')(inception_4d_pool) inception_4d_output = merge([inception_4d_1x1,inception_4d_3x3,inception_4d_5x5,inception_4d_pool_proj],mode='concat',concat_axis=1,name='inception_4d/output') loss2_ave_pool = AveragePooling2D(pool_size=(5,5),strides=(3,3),name='loss2/ave_pool')(inception_4d_output) loss2_conv = Convolution2D(128,1,1,border_mode='same',activation='relu',name='loss2/conv')(loss2_ave_pool) loss2_flat = Flatten()(loss2_conv) loss2_fc = Dense(1024,activation='relu',name='loss2/fc' )(loss2_flat) loss2_drop_fc = Dropout(0.7)(loss2_fc) loss2_classifier = Dense(1000,name='loss2/classifier' )(loss2_drop_fc) loss2_classifier_act = Activation('softmax')(loss2_classifier) # Seventh Inception module inception_4e_1x1 = Convolution2D(256,1,1,border_mode='same',activation='relu',name='inception_4e/1x1' )(inception_4d_output) inception_4e_3x3_reduce = Convolution2D(160,1,1,border_mode='same',activation='relu',name='inception_4e/3x3_reduce' )(inception_4d_output) inception_4e_3x3 = Convolution2D(320,3,3,border_mode='same',activation='relu',name='inception_4e/3x3')(inception_4e_3x3_reduce) inception_4e_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_4e/5x5_reduce' )(inception_4d_output) inception_4e_5x5 = Convolution2D(128,5,5,border_mode='same',activation='relu',name='inception_4e/5x5' )(inception_4e_5x5_reduce) inception_4e_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4e/pool')(inception_4d_output) inception_4e_pool_proj = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_4e/pool_proj')(inception_4e_pool) inception_4e_output = merge([inception_4e_1x1,inception_4e_3x3,inception_4e_5x5,inception_4e_pool_proj],mode='concat',concat_axis=1,name='inception_4e/output') inception_4e_output_zero_pad = ZeroPadding2D(padding=(1, 1))(inception_4e_output) pool4_helper = PoolHelper()(inception_4e_output_zero_pad) pool4_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool4/3x3_s2')(pool4_helper) # Eighth Inception module inception_5a_1x1 = Convolution2D(256,1,1,border_mode='same',activation='relu',name='inception_5a/1x1' )(pool4_3x3_s2) inception_5a_3x3_reduce = Convolution2D(160,1,1,border_mode='same',activation='relu',name='inception_5a/3x3_reduce' )(pool4_3x3_s2) inception_5a_3x3 = Convolution2D(320,3,3,border_mode='same',activation='relu',name='inception_5a/3x3' )(inception_5a_3x3_reduce) inception_5a_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_5a/5x5_reduce' )(pool4_3x3_s2) inception_5a_5x5 = Convolution2D(128,5,5,border_mode='same',activation='relu',name='inception_5a/5x5' )(inception_5a_5x5_reduce) inception_5a_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_5a/pool')(pool4_3x3_s2) inception_5a_pool_proj = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_5a/pool_proj' )(inception_5a_pool) inception_5a_output = merge([inception_5a_1x1,inception_5a_3x3,inception_5a_5x5,inception_5a_pool_proj],mode='concat',concat_axis=1,name='inception_5a/output') # Nineth Inception module inception_5b_1x1 = Convolution2D(384,1,1,border_mode='same',activation='relu',name='inception_5b/1x1' )(inception_5a_output) inception_5b_3x3_reduce = Convolution2D(192,1,1,border_mode='same',activation='relu',name='inception_5b/3x3_reduce' )(inception_5a_output) inception_5b_3x3 = Convolution2D(384,3,3,border_mode='same',activation='relu',name='inception_5b/3x3' )(inception_5b_3x3_reduce) inception_5b_5x5_reduce = Convolution2D(48,1,1,border_mode='same',activation='relu',name='inception_5b/5x5_reduce' )(inception_5a_output) inception_5b_5x5 = Convolution2D(128,5,5,border_mode='same',activation='relu',name='inception_5b/5x5' )(inception_5b_5x5_reduce) inception_5b_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_5b/pool')(inception_5a_output) inception_5b_pool_proj = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_5b/pool_proj' )(inception_5b_pool) inception_5b_output = merge([inception_5b_1x1,inception_5b_3x3,inception_5b_5x5,inception_5b_pool_proj],mode='concat',concat_axis=1,name='inception_5b/output') # DECONTRACTING PATH # Unpool 1 score_pool4 = Convolution2D(nclasses, 1, 1, init, 'relu', border_mode='same', name='score_pool4', W_regularizer=l2(l2_reg))(x) score2 = Deconvolution2D(nclasses, 4, 4, score_fr._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), name='score2', W_regularizer=l2(l2_reg))(score_fr) score_pool4_crop = CropLayer2D(score2, name='score_pool4_crop')(score_pool4) score_fused = merge([score_pool4_crop, score2], mode=custom_sum, output_shape=custom_sum_shape, name='score_fused') # Unpool 2 score_pool3 = Convolution2D(nclasses, 1, 1, init, 'relu', border_mode='valid', name='score_pool3', W_regularizer=l2(l2_reg))(pool3) score4 = Deconvolution2D(nclasses, 4, 4, score_fused._keras_shape, init, 'linear', border_mode='valid', subsample=(2, 2), bias=True, # TODO: No bias?? name='score4', W_regularizer=l2(l2_reg))(score_fused) score_pool3_crop = CropLayer2D(score4, name='score_pool3_crop')(score_pool3) score_final = merge([score_pool3_crop, score4], mode=custom_sum, output_shape=custom_sum_shape, name='score_final') upsample = Deconvolution2D(nclasses, 16, 16, score_final._keras_shape, init, 'linear', border_mode='valid', subsample=(8, 8), bias=False, # TODO: No bias?? name='upsample', W_regularizer=l2(l2_reg))(score_final) score = CropLayer2D(inputs, name='score')(upsample) # Softmax predictions = NdSoftmax()(score) ## Decoder part ## #First decoding block #Final block # pool5_7x7_s1 = AveragePooling2D(pool_size=(7,7),strides=(1,1),name='pool5/7x7_s2')(inception_5b_output) # loss3_flat = Flatten()(pool5_7x7_s1) # pool5_drop_7x7_s1 = Dropout(0.4)(loss3_flat) # loss3_classifier = Dense(1000,name='loss3/classifier' )(pool5_drop_7x7_s1) # predictions = Activation('softmax',name='prob')(loss3_classifier) #googlenet = Model(input=input, output=[loss1_classifier_act,loss2_classifier_act,loss3_classifier_act]) return predictions