def fCreateModel_FCN_simple(patchSize, dr_rate=0.0, iPReLU=0, l1_reg=0.0, l2_reg=1e-6): # Total params: 1,223,831 # Replace the dense layer with a convolutional layer with filters=2 for the two classes Strides = fgetStrides() kernelnumber = fgetKernelNumber() inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2]))) after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) dropout_out = Dropout(dr_rate)(after_DownConv_3) fclayer = Conv3D( 2, kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='valid', strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), )(dropout_out) fclayer = GlobalAveragePooling3D()(fclayer) outp = Activation('softmax')(fclayer) cnn_spp = Model(inputs=inp, outputs=outp) return cnn_spp
def fpathway(input_t, dr_rate=0.0, iPReLU=0, l2_reg=1e-6): Strides = fgetStrides() KernelNumber = fgetKernelNumber() after_res1_t = fCreateVNet_Block(input_t, KernelNumber[0], type=fgetLayerNumUnscaled(), iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_DownConv1_t = fCreateVNet_DownConv_Block( after_res1_t, after_res1_t._keras_shape[1], Strides[0], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_res2_t = fCreateVNet_Block(after_DownConv1_t, KernelNumber[1], type=fgetLayerNumUnscaled(), iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_DownConv2_t = fCreateVNet_DownConv_Block( after_res2_t, after_res2_t._keras_shape[1], Strides[1], iPReLU=iPReLU, l2_reg=l2_reg, dr_rate=dr_rate) after_res3_t = fCreateVNet_Block(after_DownConv2_t, KernelNumber[2], type=fgetLayerNumUnscaled(), iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_DownConv3_t = fCreateVNet_DownConv_Block( after_res3_t, after_res3_t._keras_shape[1], Strides[2], iPReLU=iPReLU, l2_reg=l2_reg, dr_rate=dr_rate) return after_DownConv3_t
def fCreateModel_SPP(patchSize, dr_rate=0.0, iPReLU=0, l2_reg=1e-6): # Total params: 1,036,856 # The third down sampling convolutional layer is replaced by the SPP module Strides = fgetStrides() kernelnumber = fgetKernelNumber() inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2]))) after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_3 = fSPP(after_Conv_3, level=3) dropout_out = Dropout(dr_rate)(after_DownConv_3) dense_out = Dense(units=2, kernel_initializer='normal', kernel_regularizer=l2(l2_reg))(dropout_out) outp = Activation('softmax')(dense_out) cnn_spp = Model(inputs=inp, outputs=outp) return cnn_spp
def fCreateModel_Inception_Archi2(patchSize, dr_rate=0.0, iPReLU=0, l2_reg=1e-6): # Total params: 2,883,791 # The three higher convolution layers except down sampling are replaced by the inception block, # the three lower convolution layers are reserved. # In work of GoogLeNet, it's beneficial for memory efficiency during training # to start using inception modules at higher layers and to keep lower layers in traditional convolutional fashion. Strides = fgetStrides() kernelnumber = fgetKernelNumber() inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2]))) after_Incep_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumIncep(), iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_DownConv_1 = fCreateVNet_DownConv_Block( after_Incep_1, after_Incep_1._keras_shape[1], Strides[0], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Incep_2 = fConvIncep(after_DownConv_1, KB=kernelnumber[1], layernum=fgetLayerNumIncep(), l2_reg=l2_reg) after_DownConv_2 = fCreateVNet_DownConv_Block( after_Incep_2, after_Incep_2._keras_shape[1], Strides[1], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Incep_3 = fIncepChain(after_DownConv_2, layernum=fgetLayerNumIncep(), l2_reg=l2_reg, iPReLU=iPReLU) after_DownConv_3 = fCreateVNet_DownConv_Block( after_Incep_3, after_Incep_3._keras_shape[1], Strides[2], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) # fully connect layer as dense flat_out = Flatten()(after_DownConv_3) dropout_out = Dropout(dr_rate)(flat_out) dense_out = Dense(units=2, kernel_initializer='normal', kernel_regularizer=l2(l2_reg))(dropout_out) outp = Activation('softmax')(dense_out) cnn_incep = Model(inputs=inp, outputs=outp) return cnn_incep
def fCreateModel_FCN_MultiFM(patchSize, dr_rate=0.0, iPReLU=0, l1_reg=0, l2_reg=1e-6): # Total params: 1,420,549 # The dense layer is repleced by a convolutional layer with filters=2 for the two classes # The FM from the third down scaled convolutional layer is upsempled by deconvolution and # added with the FM from the second down scaled convolutional layer. # The combined FM goes through a convolutional layer with filters=2 for the two classes # The two predictions are averages as the final result. Strides = fgetStrides() kernelnumber = fgetKernelNumber() inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2]))) after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg) after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2], iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg) # fully convolution over the FM from the deepest level dropout_out1 = Dropout(dr_rate)(after_DownConv_3) fclayer1 = Conv3D( 2, kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='valid', strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), )(dropout_out1) fclayer1 = GlobalAveragePooling3D()(fclayer1) # Upsample FM from the deepest level, add with FM from level 2, UpedFM_Level3 = Conv3DTranspose(filters=97, kernel_size=(3, 3, 1), strides=(2, 2, 1), padding='same')(after_DownConv_3) conbined_FM_Level23 = add([UpedFM_Level3, after_DownConv_2]) fclayer2 = Conv3D( 2, kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='valid', strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), )(conbined_FM_Level23) fclayer2 = GlobalAveragePooling3D()(fclayer2) # combine the two predictions using average fcl_aver = average([fclayer1, fclayer2]) predict = Activation('softmax')(fcl_aver) cnn_fcl_msfm = Model(inputs=inp, outputs=predict) return cnn_fcl_msfm