def build_resnet(input_shape, block_fn, repetitions, plot_model_f): """Builds a custom ResNet like architecture. Args: input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols) num_outputs: The number of outputs at final softmax layer block_fn: The block function to use. This is either `basic_block` or `bottleneck`. The original paper used basic_block for layers < 50 repetitions: Number of repetitions of various block units. At each block unit, the number of filters are doubled and the input size is halved Returns: The resnet part of the model. """ kr._handle_dim_ordering() if len(input_shape) != 3: raise Exception( "Input shape should be a tuple (nb_channels, nb_rows, nb_cols)") # Permute dimension order if necessary if K.image_dim_ordering() == 'tf': input_shape = (input_shape[1], input_shape[2], input_shape[0]) # Load function from str if needed. block_fn = kr._get_block(block_fn) input = Input(shape=input_shape) conv1 = kr._conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(2, 2))(input) pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(conv1) block = pool1 filters = 64 for i, r in enumerate(repetitions): block = kr._residual_block(block_fn, filters=filters, repetitions=r, is_first_layer=(i == 0))(block) filters *= 2 # Last activation block = kr._bn_relu(block) # Classifier block block_shape = K.int_shape(block) pool2 = AveragePooling2D(pool_size=(block_shape[kr.ROW_AXIS], block_shape[kr.COL_AXIS]), strides=(1, 1))(block) model = Model(inputs=input, outputs=pool2) # plot_model(model, to_file='../model_resnet_inner.png', show_shapes=True) return model
def focusnet(): input = Input((192, 256, 3)) conv1 = initial_conv_block(input) #512 pool1 = _residual_block(basic_block, filters=32, repetitions=1, is_first_layer=False)(conv1) #256 conv2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(pool1) #256 pool2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(conv2) #128 conv3 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(pool2) #128 pool3 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=False)(conv3) #64 conv4 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(pool3) #64 drop4 = Dropout(0.2)(conv4) up5 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop4)) #128 merge5 = keras.layers.Concatenate()([conv3,up5]) conv5 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(merge5) #128 up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv5)) #256 merge6 = keras.layers.Concatenate()([conv2,up6]) conv6 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(merge6) #256 up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) #512 merge7 = keras.layers.Concatenate()([conv1,up7]) conv7 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(merge7) #512 conv1r = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(1, 1))(input) #512 block1 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(conv1r) #512 se1 = squeeze_excite_block(block1) gate1 = Activation('sigmoid')(conv7) block1concat = keras.layers.Multiply()([se1, gate1]) #512 block1se = squeeze_excite_block(block1concat) block1b = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(block1se) #256 block2 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(block1b) #256 se2 = squeeze_excite_block(block2) gate2 = Activation('sigmoid')(conv6) block2concat = keras.layers.Multiply()([se2, gate2]) #256 block2se = squeeze_excite_block(block2concat) block2b = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=False)(block2se) #128 block3 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(block2b) #128 se3 = squeeze_excite_block(block3) gate3 = Activation('sigmoid')(conv5) block3concat = keras.layers.Multiply()([se3, gate3]) #128 block3se = squeeze_excite_block(block3concat) block3b = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=False)(block3se) # 64 block4 = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=True)(block3b) #64 block4se = squeeze_excite_block(block4) block4b = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=False)(block4se) #32 up2_5 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(block4b)) #64 merge2_5 = keras.layers.Concatenate()([block3b,up2_5]) conv2_5 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(merge2_5) #64 out1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (8,8))(conv2_5)) out1 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out1) up2_6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_5)) #128 merge2_6 = keras.layers.Concatenate()([block2b,up2_6]) conv2_6 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(merge2_6) #128 out2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (4,4))(conv2_6)) out2 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out2) up2_7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_6)) #256 merge2_7 = keras.layers.Concatenate()([block1b,up2_7]) conv2_7 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(merge2_7) #256 out3 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_7)) out3 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out3) up2_8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_7)) #512 merge2_8 = keras.layers.Concatenate()([conv1r,up2_8]) conv2_8 = _residual_block(basic_block, filters=32, repetitions=1, is_first_layer=True)(merge2_8) #512 conv2_8 = _residual_block(basic_block, filters=16, repetitions=1, is_first_layer=True)(conv2_8) conv2_8 = _residual_block(basic_block, filters=4, repetitions=1, is_first_layer=True)(conv2_8) out4 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(conv2_8) out_concat = keras.layers.Concatenate()([out1, out2, out3, out4]) out_concat = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(out_concat) out = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out_concat) model = Model(inputs=input, outputs=out) model.compile(optimizer = SGD(lr=0.0005, momentum=0.9, nesterov=True), loss = losses.focal_tversky, metrics = [losses.tp, losses.tn, losses.dsc, losses.jacard_coef, 'accuracy']) model.summary() return model
up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', name='UpConv3')(UpSampling2D(size = (2,2), name='Up3')(conv7_1)) merge8 = keras.layers.Concatenate(name='Concat3')([conv2,up8]) conv8 = Residual15(96, 32, merge8) conv8_1 = Residual16(32, 16, conv8) up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', name='UpConv4')(UpSampling2D(size = (2,2), name='Up4')(conv8_1)) merge9 = keras.layers.Concatenate(name='Concat4')([conv1,up9]) conv9 = Residual17(48, 16, merge9) conv10 = Residual18(16, 2, conv9) conv10 = Residual19(2, 1, conv10) conv11 = Conv2D(1, 1, activation = 'sigmoid', name='Output')(conv10) conv1r = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(1, 1))(input) block1 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(conv1r) block1concat = keras.layers.Concatenate()([block1, conv9]) block1se = squeeze_excite_block(block1concat) block1conv1 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block1se) block1conv1 = BatchNormalization(axis=CHANNEL_AXIS)(block1conv1) block1conv1 = layers.LeakyReLU()(block1conv1) block1conv2 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block1conv1) block1conv2 = BatchNormalization(axis=CHANNEL_AXIS)(block1conv2) block1conv2 = layers.LeakyReLU()(block1conv2) block1b = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(block1conv2) block2 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(block1b) block2concat = keras.layers.Concatenate()([block2, conv8]) block2se = squeeze_excite_block(block2concat) block2conv1 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block2se) block2conv1 = BatchNormalization(axis=CHANNEL_AXIS)(block2conv1)
def get_unet(): input = Input((256, 256, 3)) conv1 = initial_conv_block(input) #512 pool1 = _residual_block(basic_block, filters=32, repetitions=1, is_first_layer=False)(conv1) #256 conv2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(pool1) #256 pool2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(conv2) #128 conv3 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(pool2) #128 pool3 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=False)(conv3) #64 conv4 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(pool3) #64 drop4 = Dropout(0.2)(conv4) up5 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')( UpSampling2D(size=(2, 2))(drop4)) #128 merge5 = keras.layers.Concatenate()([conv3, up5]) conv5 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(merge5) #128 up6 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')( UpSampling2D(size=(2, 2))(conv5)) #256 merge6 = keras.layers.Concatenate()([conv2, up6]) conv6 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(merge6) #256 up7 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')( UpSampling2D(size=(2, 2))(conv6)) #512 merge7 = keras.layers.Concatenate()([conv1, up7]) conv7 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(merge7) #512 conv1r = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(1, 1))(input) #512 block1 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(conv1r) #512 se1 = squeeze_excite_block(block1) gate1 = Activation('sigmoid')(conv7) block1concat = keras.layers.Multiply()([se1, gate1]) #512 block1se = squeeze_excite_block(block1concat) block1b = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(block1se) #256 block2 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(block1b) #256 se2 = squeeze_excite_block(block2) gate2 = Activation('sigmoid')(conv6) block2concat = keras.layers.Multiply()([se2, gate2]) #256 block2se = squeeze_excite_block(block2concat) block2b = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=False)(block2se) #128 block3 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(block2b) #128 se3 = squeeze_excite_block(block3) gate3 = Activation('sigmoid')(conv5) block3concat = keras.layers.Multiply()([se3, gate3]) #128 block3se = squeeze_excite_block(block3concat) block3b = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=False)(block3se) # 64 block4 = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=True)(block3b) #64 block4se = squeeze_excite_block(block4) block4b = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=False)(block4se) #32 up2_5 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')( UpSampling2D(size=(2, 2))(block4b)) #64 merge2_5 = keras.layers.Concatenate()([block3b, up2_5]) conv2_5 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(merge2_5) #64 up2_6 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')( UpSampling2D(size=(2, 2))(conv2_5)) #128 merge2_6 = keras.layers.Concatenate()([block2b, up2_6]) conv2_6 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(merge2_6) #128 up2_7 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')( UpSampling2D(size=(2, 2))(conv2_6)) #256 merge2_7 = keras.layers.Concatenate()([block1b, up2_7]) conv2_7 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(merge2_7) #256 up2_8 = Conv2D(32, 2, activation='relu', padding='same', kernel_initializer='he_normal')( UpSampling2D(size=(2, 2))(conv2_7)) #512 merge2_8 = keras.layers.Concatenate()([conv1r, up2_8]) conv2_8 = _residual_block(basic_block, filters=32, repetitions=1, is_first_layer=True)(merge2_8) #512 conv2_8 = _residual_block(basic_block, filters=16, repetitions=1, is_first_layer=True)(conv2_8) conv2_8 = _residual_block(basic_block, filters=4, repetitions=1, is_first_layer=True)(conv2_8) out = Conv2D(1, 1, activation='sigmoid', padding='same', kernel_initializer='he_normal')(conv2_8) model = Model(inputs=input, outputs=out) model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef, jaccard_coef, 'acc']) model.summary() return model
def jaccard_coef(y_true, y_pred): intersection = K.sum(y_true * y_pred, axis=[0, -1, -2]) sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2]) jac = (intersection + smooth) / (sum_ - intersection + smooth) return K.mean(jac) with tf.device('/device:GPU:0'): input = Input((192, 192, 3), name='Input') conv1 = _conv_bn_relu(filters=32, kernel_size=(7, 7), strides=(1, 1))(input) conv1 = _residual_block(basic_block, filters=32, repetitions=1, is_first_layer=True)(conv1) pool1 = MaxPooling2D(pool_size=(2, 2), name='MaxPool1')(conv1) conv2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(pool1) conv2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(conv2) pool2 = MaxPooling2D(pool_size=(2, 2), name='MaxPool2')(conv2) conv3 = _residual_block(basic_block, filters=128,