def fcn_VGG16_32s(INPUT_SIZE,nb_classes): """ Returns Keras FCN-32 model definition. """ # Input and output layers for FCN: inputs = Input(shape=(INPUT_SIZE, INPUT_SIZE, 3)) # Start from VGG16 layers vgg16 = VGG16(weights='imagenet', include_top=False, input_tensor=inputs) # score from the top vgg16 layer: score7 = vgg16.output #score7 = Dropout(0.5)(score7) # (optional) score7c = Conv2D(filters=nb_classes,kernel_size=(1, 1), name='score7c')(score7) # score7c_upsample_32 = Conv2DTranspose(filters=nb_classes, kernel_size=(64, 64), strides=(32, 32), padding='same', activation=None, kernel_initializer=Constant(bilinear_upsample_weights(32, nb_classes)), name="score_pool7c_upsample_32")(score7c) fcn_output = (Activation('softmax'))(score7c_upsample_32) #fcn_output = score7c_upsample_32 model = Model(inputs=inputs, output=fcn_output, name='fcn_VGG16_32s') # Fixing weighs in lower layers # for layer in model.layers[:15]: # sometimes I use it, sometimes not. # layer.trainable = False return model
def fcn_RESNET50_32s(INPUT_SIZE,nb_classes): """ Returns Keras FCN-32 + based on ResNet50 model definition. """ # Input and output layers for FCN: inputs = Input(shape=(INPUT_SIZE, INPUT_SIZE, 3)) # Start from ResNet50 layers resnet50 = ResNet50(weights='imagenet', include_top=False, input_tensor=inputs) # score from the top resnet50 layer: act49 = resnet50.output # equivalent to: resnet50.get_layer('activation_49').output act49 = Dropout(0.5)(act49) # (optional) # add classifier: pred32 = Conv2D(filters=nb_classes,kernel_size=(1, 1), name='pred_32')(act49) # add upsampler: score_pred32_upsample = Conv2DTranspose(filters=nb_classes, kernel_size=(64, 64), strides=(32, 32), padding='same', activation=None, kernel_initializer=Constant(bilinear_upsample_weights(32, nb_classes)), name="score_pred32_upsample")(pred32) output = (Activation('softmax'))(score_pred32_upsample) model = Model(inputs=inputs, outputs=output, name='fcn_RESNET50_32s') # fine-tune train_layers = ['pred_32', 'score_pred32_upsample' 'bn5c_branch2c', 'res5c_branch2c', 'bn5c_branch2b', 'res5c_branch2b', 'bn5c_branch2a', 'res5c_branch2a', 'bn5b_branch2c', 'res5b_branch2c', 'bn5b_branch2b', 'res5b_branch2b', 'bn5b_branch2a', 'res5b_branch2a', 'bn5a_branch2c', 'res5a_branch2c', 'bn5a_branch2b', 'res5a_branch2b', 'bn5a_branch2a', 'res5a_branch2a'] # for l in model.layers: # if l.name in train_layers: # l.trainable = True # else: # l.trainable = False return model
def fcn_32s_orig(nb_classes): inputs = Input(shape=(None, None, 3)) vgg16 = VGG16(weights='imagenet', include_top=False, input_tensor=inputs) x = Conv2D(filters=nb_classes, kernel_size=(1, 1))(vgg16.output) x = Conv2DTranspose(filters=nb_classes, kernel_size=(64, 64), strides=(32, 32), padding='same', activation='sigmoid', kernel_initializer=Constant(bilinear_upsample_weights(32, nb_classes)))(x) model = Model(inputs=inputs, outputs=x) for layer in model.layers[:15]: layer.trainable = False return model
def fcn_VGG16_8s(INPUT_SIZE,nb_classes): # previous name: fcn8s_take2 """ Returns Keras FCN-8 model definition. """ fcn32_flag = False inputs = Input(shape=(INPUT_SIZE, INPUT_SIZE, 3)) # Start from VGG16 layers vgg16 = VGG16(weights='imagenet', include_top=False, input_tensor=inputs) # Skip connections from pool3, 256 channels vgg16_upto_intermediate_layer_pool3 = Model(inputs=vgg16.input, outputs=vgg16.get_layer('block3_pool').output) score_pool3 = vgg16_upto_intermediate_layer_pool3.output # 1x1 conv layer to reduce number of channels to nb_classes: score_pool3c = Conv2D(filters=nb_classes,kernel_size=(1, 1),name="score_pool3c")(score_pool3) # Skip connections from pool4, 512 channels vgg16_upto_intermediate_layer_pool4 = Model(inputs=vgg16.input, outputs=vgg16.get_layer('block4_pool').output) score_pool4 = vgg16_upto_intermediate_layer_pool4.output # 1x1 conv layer to reduce number of channels to nb_classes: score_pool4c = Conv2D(filters=nb_classes, kernel_size=(1, 1))(score_pool4) # score from the top vgg16 layer: score_pool5 = vgg16.output #score_pool5 = Dropout(0.5)(score_pool5) # (optional) #n = 4096 score6c = Conv2D(filters=4096, kernel_size=(7, 7), padding='same', name="conv6")(score_pool5) score6c = Dropout(0.5)(score6c) # we need dropout as regularization and weight decays for RNNs score7c = Conv2D(filters=4096, kernel_size=(1, 1), padding='same', name="conv7")(score6c) score7c = Dropout(0.5)(score7c) # we need dropout as regularization and weight decays for RNNs #score7c = Conv2D(filters=nb_classes,kernel_size=(1, 1))(score6c) score7c_upsample = Conv2DTranspose(filters=nb_classes, kernel_size=(4, 4), strides=(2, 2), padding='same', activation = None, kernel_initializer = Constant(bilinear_upsample_weights(2, nb_classes)), name="score_pool7c_upsample")(score7c) # Fuse scores score_7_4 = Add()([score7c_upsample, score_pool4c]) # upsample: score_7_4_up = Conv2DTranspose(filters=nb_classes, kernel_size=(4, 4), strides=(2, 2), padding='same', activation= None, kernel_initializer=Constant(bilinear_upsample_weights(2, nb_classes)), name="score_7_4_up")(score_7_4) # Fuse scores score_7_4_3 = Add()([score_7_4_up, score_pool3c]) # upsample: score_7_4_3_up = Conv2DTranspose(filters=nb_classes, kernel_size=(16, 16), strides=(8, 8), padding='same', activation=None, kernel_initializer=Constant(bilinear_upsample_weights(8, nb_classes)), name="score_7_4_3_up")(score_7_4_3) # Batch Normalization: (optional) -- another way to large weights and exploding gradient. score_7_4_3_up = BatchNormalization()(score_7_4_3_up) output = (Activation('softmax'))(score_7_4_3_up) # # -- There's another way to match the tensor sizes from earlier layers, using a Cropping2D layer -- # # e.g., for fcn-16, we can crop layer 'score_pool4c' to get the same size as layer 'score_7c' # score_pool4c_cropped = Cropping2D((5+3, 5+3))(score_pool4c) # # fuse layers, # score_7_4_cropped = Add()([score7c, score_pool4c_cropped]) # # then upsample to input size: # x = Conv2DTranspose(filters=nb_classes, # kernel_size=(64, 64), # strides=(32+2,32+2), # padding='same', # activation='sigmoid', # kernel_initializer=Constant(bilinear_upsample_weights(32, nb_classes)))(score_7_4_cropped) # Creating the model: model = Model(inputs=inputs, outputs=output, name='fcn_VGG16_8s') # Fixing weighs in lower layers # for layer in model.layers[:15]: # sometimes I use it, sometimes not. # layer.trainable = False return model