Exemple #1
0
def resnet_layer(inputs,
                 num_filters=16,
                 kernel_size=3,
                 strides=1,
                 activation='relu',
                 batch_normalization=True,
                 conv_first=True):
    conv = Conv2D(num_filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same',
                  kernel_initializer='he_normal',
                  kernel_regularizer=l2(1e-4))

    x = inputs
    if conv_first:
        x = conv(x)
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
    else:
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
        x = conv(x)
    return x
Exemple #2
0
    def build_discriminator(self):

        model = Sequential()

        model.add(Dense(78, activation="relu", input_dim=self.input_shape))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(56, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(32, activation="relu"))
        model.add(Dropout(rate=0.3))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(28, activation="relu"))
        model.add(Dropuout(rate=0.3))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(10, activation="relu"))

        model.summary()

        img = Input(shape=self.img_shape)

        features = model(img)
        valid = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes + 1, activation="softmax")(features)

        return Model(img, [valid, label])
Exemple #3
0
 def feature_extractor_network(self):
     # input
     in_image = Input(shape = in_shape)
     # C1 Layer
     nett = Conv2D(32,(5,5))(in_image)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # M2 Layer
     nett = MaxPooling2D(pool_size = (3,3))(nett)
     # C3 Layer
     nett = Conv2D(64,(3,3))		
     nett = BatchNormalization(pool_size = (3,3))(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # L4 Layer
     nett = LocallyConnected2D(128,(3,3))(nett)
     # L5 Layer
     nett = LocallyConnected2D(256,(3,3))(nett)
     # F6 Layer
     nett = Dense(512,activation='relu')(nett)
     nett = Dropout(0.2)(nett)
     # F7 Layer 
     out_features = Dense(activation='tanh')(nett)
     # output
     model = Model(inputs = in_image, outputs = out_features)
     return model
Exemple #4
0
def projection_block_3D(input_tensor,
                        filters,
                        kernel_size=(3, 3, 3),
                        stage=0,
                        block=0,
                        se_enabled=False,
                        se_ratio=16):

    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    # downsampling directly by convolution with stride 2
    x = Conv3D(filters=numFilters1,
               kernel_size=kernel_size,
               strides=(2, 2, 2),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)

    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Conv3D(filters=numFilters2,
               kernel_size=kernel_size,
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2b')(x)

    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block_3D(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    # projection shortcut convolution
    x_shortcut = Conv3D(filters=numFilters2,
                        kernel_size=(2, 2, 2),
                        strides=(2, 2, 2),
                        padding='same',
                        kernel_initializer='he_normal',
                        name=conv_name_base + '1')(input_tensor)
    x_shortcut = BatchNormalization(axis=bn_axis,
                                    name=bn_name_base + '1')(x_shortcut)

    # addition of shortcut
    x = Add()([x, x_shortcut])

    x = LeakyReLU(alpha=0.01)(x)

    return x
Exemple #5
0
def zero_padding_block(input_tensor,
                       filters,
                       stage,
                       block,
                       se_enabled=False,
                       se_ratio=16):
    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    # downsampling directly by convolution with stride 2
    x = Conv2D(numFilters1, (3, 3),
               strides=(2, 2),
               kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(numFilters2, (3, 3),
               kernel_initializer='he_normal',
               name=conv_name_base + '2b')(x)
    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    # zero padding and downsampling with 1x1 conv shortcut connection
    x_shortcut = Conv2D(1, (1, 1),
                        strides=(2, 2),
                        kernel_initializer='he_normal',
                        name=conv_name_base + '1')(input_tensor)
    x_shortcut2 = MaxPooling2D(pool_size=(1, 1),
                               strides=(2, 2),
                               border_mode='same')(input_tensor)
    x_shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(x_shortcut)

    x_shortcut = BatchNormalization(axis=bn_axis,
                                    name=bn_name_base + '1')(x_shortcut)

    # addition of shortcut
    x = Add()([x, x_shortcut])
    x = Activation('relu')(x)

    return x
Exemple #6
0
def projection_bottleneck_block(input_tensor,
                                filters,
                                stage,
                                block,
                                se_enabled=False,
                                se_ratio=16):
    numFilters1, numFilters2, numFilters3 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    x = Conv2D(numFilters1, (1, 1),
               strides=(2, 2),
               kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(numFilters2, (3, 3),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(numFilters3, (1, 1),
               kernel_initializer='he_normal',
               name=conv_name_base + '2c')(x)
    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    # projection shortcut
    x_shortcut = Conv2D(numFilters3, (1, 1),
                        strides=(2, 2),
                        kernel_initializer='he_normal',
                        name=conv_name_base + '1')(input_tensor)
    x_shortcut = BatchNormalization(axis=bn_axis,
                                    name=bn_name_base + '1')(x_shortcut)

    x = Add()([x, x_shortcut])
    x = Activation('relu')(x)

    return x
Exemple #7
0
def main():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation(
    )
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    # Compile model
    model = Sequential()
    model.add(BatchNormalization(input_shape=(1, )))
    model.add(Dense(10, use_bias=True))
    model.add(Activation('relu'))
    model.add(Dense(1, use_bias=True))
    learning_rates = [1e-4, 1e-3, 1e-2]
    adam_optimizer = Adam(lr=learning_rates[0])
    model.compile(loss='mean_absolute_error',
                  optimizer=adam_optimizer,
                  metrics=[metrics.mae])

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(x=train_data,
              y=train_label,
              validation_data=(validation_data, validation_label),
              epochs=100
              #,callbacks=[kp.plot_losses]
              )

    return model
Exemple #8
0
def identity_block_3D(input_tensor,
                      filters,
                      kernel_size=(3, 3, 3),
                      stage=0,
                      block=0,
                      se_enabled=False,
                      se_ratio=16):

    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    x = Conv3D(filters=numFilters1,
               kernel_size=kernel_size,
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)

    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Conv3D(filters=numFilters2,
               kernel_size=kernel_size,
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2b')(x)

    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block_3D(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    x = Add()([x, input_tensor])

    x = LeakyReLU(alpha=0.01)(x)

    return x
Exemple #9
0
    def build_generator(self):

        model = Sequential()

        model.add(Dense(78, activation="relu", input_dim=self.latent_dim))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(56, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(32, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(28, activation="tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)
Exemple #10
0
def conv_bn_relu(inputs):
    out = Conv2D(24,
                 3,
                 1,
                 "same",
                 kernel_initializer='he_normal',
                 bias_initializer='zeros')(inputs)
    out = BatchNormalization()(out)
    out = Relu()(out)
    return out
Exemple #11
0
 def generator_network(self):
     # input
     in_latents = Input(shape = (self.latent_dim,))
     #DC1
     nett = Conv2DTranspose(512,(3,3))(in_latents)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC2
     nett = Conv2DTranspose(128,(3,3))(nett)	
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC3
     nett = Conv2DTranspose(64,(3,3))		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     #DC4
     nett = Conv2DTranspose(32,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     out_image = Dense(alpha = 0.2)(nett)
     #output
     model = Model(inputs = in_latents, outputs = out_image)
     return model
Exemple #12
0
 def discriminator_network(self):
     # input
     in_image = Input(shape=self.img_shape)
     # C1 layer
     nett = Conv2D(64,(5,5))(in_image)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # C2 layer
     nett = Conv2D(128,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     nett = Dropout(0.2)(nett)
     # C3 layer
     nett = Conv2D(256,(5,5))(nett)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     nett = Dropout(0.2)(nett)
     # F4 layer
     nett = Flatten()(nett)
     validity = Dense(1,alpha = 0.2)(nett)
     #output
     model =  Model(inputs = in_image, outputs = validity)
     return model
def createModel(patchSize, numClasses, usingClassification=False):

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    input_tensor = Input(shape=(patchSize[0], patchSize[1], patchSize[2], 1))

    # first stage
    x = Conv3D(filters=16,
               kernel_size=(5, 5, 5),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(input_tensor)
    x = BatchNormalization(axis=bn_axis)(x)
    x_after_stage_1 = LeakyReLU(alpha=0.01)(x)

    #x_after_stage_1 = Add()([input_tensor, x])

    # first down convolution
    x_down_conv_1 = projection_block_3D(x_after_stage_1,
                                        filters=(32, 32),
                                        kernel_size=(2, 2, 2),
                                        stage=1,
                                        block=1,
                                        se_enabled=True,
                                        se_ratio=4)

    # second stage
    x = identity_block_3D(x_down_conv_1,
                          filters=(32, 32),
                          kernel_size=(3, 3, 3),
                          stage=2,
                          block=1,
                          se_enabled=True,
                          se_ratio=4)
    x_after_stage_2 = identity_block_3D(x,
                                        filters=(32, 32),
                                        kernel_size=(3, 3, 3),
                                        stage=2,
                                        block=2,
                                        se_enabled=True,
                                        se_ratio=4)

    # second down convolution
    x_down_conv_2 = projection_block_3D(x_after_stage_2,
                                        filters=(64, 64),
                                        kernel_size=(2, 2, 2),
                                        stage=2,
                                        block=3,
                                        se_enabled=True,
                                        se_ratio=8)

    # third stage
    x = identity_block_3D(x_down_conv_2,
                          filters=(64, 64),
                          kernel_size=(3, 3, 3),
                          stage=3,
                          block=1,
                          se_enabled=True,
                          se_ratio=8)
    x_after_stage_3 = identity_block_3D(x,
                                        filters=(64, 64),
                                        kernel_size=(3, 3, 3),
                                        stage=3,
                                        block=2,
                                        se_enabled=True,
                                        se_ratio=8)
    #x = identity_block_3D(x, filters=(64, 64), kernel_size=(3, 3, 3), stage=3, block=3, se_enabled=False, se_ratio=16)

    # third down convolution
    x_down_conv_3 = projection_block_3D(x_after_stage_3,
                                        filters=(128, 128),
                                        kernel_size=(2, 2, 2),
                                        stage=3,
                                        block=4,
                                        se_enabled=True,
                                        se_ratio=16)

    # fourth stage
    x = identity_block_3D(x_down_conv_3,
                          filters=(128, 128),
                          kernel_size=(3, 3, 3),
                          stage=4,
                          block=1,
                          se_enabled=True,
                          se_ratio=16)
    x_after_stage_4 = identity_block_3D(x,
                                        filters=(128, 128),
                                        kernel_size=(3, 3, 3),
                                        stage=4,
                                        block=2,
                                        se_enabled=True,
                                        se_ratio=16)
    #x = identity_block_3D(x, filters=(128, 128), kernel_size=(3, 3, 3), stage=4, block=3, se_enabled=False, se_ratio=16)

    ### end of encoder path

    if usingClassification:
        # use x_after_stage_4 as quantification output
        # global average pooling
        x_class = GlobalAveragePooling3D(
            data_format=K.image_data_format())(x_after_stage_4)

        # fully-connected layer
        classification_output = Dense(units=numClasses,
                                      activation='softmax',
                                      kernel_initializer='he_normal',
                                      name='classification_output')(x_class)

    ### decoder path

    # first 3D upsampling
    x = UpSampling3D(size=(2, 2, 2),
                     data_format=K.image_data_format())(x_after_stage_4)
    x = Conv3D(filters=64,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = concatenate([x, x_after_stage_3], axis=bn_axis)

    # first decoder stage
    x = identity_block_3D(x,
                          filters=(128, 128),
                          kernel_size=(3, 3, 3),
                          stage=6,
                          block=1,
                          se_enabled=True,
                          se_ratio=16)
    x = identity_block_3D(x,
                          filters=(128, 128),
                          kernel_size=(3, 3, 3),
                          stage=6,
                          block=2,
                          se_enabled=True,
                          se_ratio=16)

    # second 3D upsampling
    x = UpSampling3D(size=(2, 2, 2), data_format=K.image_data_format())(x)
    x = Conv3D(filters=32,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = concatenate([x, x_after_stage_2], axis=bn_axis)

    # second decoder stage
    x = identity_block_3D(x,
                          filters=(64, 64),
                          kernel_size=(3, 3, 3),
                          stage=7,
                          block=1,
                          se_enabled=True,
                          se_ratio=8)
    x = identity_block_3D(x,
                          filters=(64, 64),
                          kernel_size=(3, 3, 3),
                          stage=7,
                          block=2,
                          se_enabled=True,
                          se_ratio=8)

    # third 3D upsampling
    x = UpSampling3D(size=(2, 2, 2), data_format=K.image_data_format())(x)
    x = Conv3D(filters=16,
               kernel_size=(3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = concatenate([x, x_after_stage_1], axis=bn_axis)

    # third decoder stage
    x = identity_block_3D(x,
                          filters=(32, 32),
                          kernel_size=(3, 3, 3),
                          stage=9,
                          block=1,
                          se_enabled=True,
                          se_ratio=4)
    #x = identity_block_3D(x, filters=(32, 32), kernel_size=(3, 3, 3), stage=9, block=2, se_enabled=True, se_ratio=4)

    ### End of decoder

    ### last segmentation segments
    # 1x1x1-Conv3 produces 2 featuremaps for probabilistic  segmentations of the foreground and background
    x = Conv3D(filters=2,
               kernel_size=(1, 1, 1),
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name='conv_veryEnd')(x)
    #x = BatchNormalization(axis=bn_axis)(x) # warum leakyrelu vor softmax?
    #x = LeakyReLU(alpha=0.01)(x)

    segmentation_output = Softmax(axis=bn_axis, name='segmentation_output')(x)
    #segmentation_output = keras.layers.activations.sigmoid(x)

    # create model
    if usingClassification:
        cnn = Model(inputs=[input_tensor],
                    outputs=[segmentation_output, classification_output],
                    name='3D-VResFCN-Classification')
        sModelName = cnn.name
    else:
        cnn = Model(inputs=[input_tensor],
                    outputs=[segmentation_output],
                    name='3D-VResFCN')
        sModelName = cnn.name

    return cnn, sModelName
Exemple #14
0
    out = BatchNormalization()(out)
    out = Relu()(out)
    return out


##################################### model structure #########################################
#---------------------------------------- encoder --------------------------------------------#
inputs = Input(shape=(512, 512, 2))

a1 = Conv2D(24,
            3,
            1,
            "same",
            kernel_initializer='he_normal',
            bias_initializer='zeros')(inputs)
a1 = BatchNormalization()(a1)
a1 = Relu()(a1)

a2 = Conv2D(24,
            3,
            1,
            "same",
            kernel_initializer='he_normal',
            bias_initializer='zeros')(a1)
a2 = BatchNormalization()(a2)
a2 = Relu()(a2)
a2 = Merge
#---------------------------------------------
b1 = MaxPooling2D((2, 2), padding='valid')(a2)

b1 = Conv2D(48,