Exemplo n.º 1
0
    def get_model_attntype():
        if database == 3:
            input_layer = Input(shape=(
                input.shape[1],
                x_train_cv.shape[2],
            ))
            flatten_layer = Flatten(input_shape=(
                x_train_cv.shape[1],
                x_train_cv.shape[2],
            ))(input_layer)
            dense = Dense(no_neurons, activation='relu')(flatten_layer)
        else:
            input_layer = Input(shape=(input.shape[1], ))
            dense = Dense(no_neurons, activation='relu')(input_layer)
        if bn_active:
            batchnorm = bn()(dense)

            for i in range(no_layers - 2):
                dense = Dense(no_neurons, activation='relu')(batchnorm)
                batchnorm = bn()(dense)

            dense = Dense(no_neurons, activation='relu')(batchnorm)
        else:
            for i in range(no_layers - 2):
                dense = Dense(no_neurons, activation='relu')(dense)
            dense = Dense(no_neurons, activation='relu')(dense)
        conf_layer = Dense(num_classes, activation='sigmoid')(dense)
        if database == 3:
            attention_probs = Dense(num_classes,
                                    activation='softmax',
                                    name='attention_probs')(flatten_layer)
        else:
            attention_probs = Dense(num_classes,
                                    activation='softmax',
                                    name='attention_probs')(input_layer)
        attention_mul = multiply([conf_layer, attention_probs],
                                 name='attention_mul')

        model = Model(input=[input_layer], output=attention_mul)
        adam = optimizers.adam(lr=0.001,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-08,
                               decay=0.0,
                               amsgrad=False)
        if database in [0, 1, 2, 6]:
            model.compile(optimizer=adam,
                          loss='binary_crossentropy',
                          metrics=['accuracy'])
        elif database in [4, 5]:
            model.compile(optimizer=adam, loss='mean_squared_error')
        elif database in [3]:
            model.compile(optimizer=adam,
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

        model.summary()
        return model
Exemplo n.º 2
0
 def onecnn_buildnetwork(self):
     x = self.input
     for i in range(0, self.layernum):
         # 使用默认的参数kernel_initializer='random_normal',标准差stddev
         # a layer instance is callable on a tensor, and returns a tensor
         x = cnn1(self.featuremapnum*(2**i), self.Convolutionkernel, kernel_regularizer=regularizers.l2(0.01),
                   kernel_initializer=initializers.random_normal(stddev=0.01),
                   bias_initializer='zeros')(x)
         x = bn()(x)
         x = Activation('relu')(x)
         x = Dropout(0.25)(x)
         x = pool()(x)#默认的参数为2
     x = Flatten()(x)
     x = Dense(self.featuremapnum*(2**i))(x)
     x = bn()(x)
     x = Activation('relu')(x)
     x = Dropout(0.5)(x)
     predictions = Dense(1, activation='linear')(x)
     return predictions
Exemplo n.º 3
0
	def compile(self,load=False):
		model = Sequential()
		model.add(Dense(n_hidden,input_shape=(4,)))
		model.add(bn())
		model.add(Activation('relu'))
		model.add(Dense(n_hidden))
		model.add(bn())
		model.add(Activation('relu'))
		model.add(Dense(3))
		model.add(Activation('softmax'))
		adam = optimizers.Adam(lr=self.lr)
		model.compile(loss='categorical_crossentropy',
				optimizer=adam,
				metrics=['accuracy'])
		self.model = model
		plot_model(model,show_shapes=True,to_file='model.png',rankdir='TB')
		model.summary()
		if load == True:
			model.load_weights(self.save_path)
			print("Loaded weights from {0}.".format(self.save_path))
Exemplo n.º 4
0
 def compile(self, load=False):
     model = Sequential()
     model.add(Dense(n_hidden, input_shape=(4, )))
     model.add(bn())
     model.add(Activation('relu'))
     model.add(Dense(n_hidden))
     model.add(bn())
     model.add(Activation('relu'))
     model.add(Dense(3))
     model.add(Activation('softmax'))
     adam = optimizers.Adam(lr=self.lr)
     model.compile(loss='categorical_crossentropy',
                   optimizer=adam,
                   metrics=['accuracy'])
     self.model = model
     plot_model(model, show_shapes=True, to_file='model.png', rankdir='TB')
     model.summary()
     if load == True:
         model.load_weights(self.save_path)
         print("Loaded weights from {0}.".format(self.save_path))
Exemplo n.º 5
0
    def buildnetwork(self,Cardinalnumber):
        x = self.input
        for i in range(0, self.layernum):
            x = Dense(self.Neuronsnum-i*Cardinalnumber,
                      kernel_regularizer=regularizers.l2(0.01),
                      kernel_initializer=initializers.random_normal(stddev=0.01),
                      bias_initializer='zeros', weights=self.list[i])(x)# 使用默认的参数kernel_initializer='random_normal',标准差stddev
            #x = Dense(self.Neuronsnum - i * Cardinalnumber, weights=self.list[i])(x)
            #测试代码段
            #print('list',self.list[i])

            x = bn()(x)
            x = Activation('relu')(x)
            x = Dropout(0.5)(x)
        #predictions = Dense(1, activation='linear')(x)
        predictions = Dense(1, activation='linear')(x)
        #最后的输出是没有激活函数的
        #predictions = Dense(1)(x)
        return predictions


# from FCAutoANN.layers import layer
# from keras.layers.core import Dense, Activation
#
# class network():
#     def __init__(self,layernum,Neuronsnum,input):
#         self.layernum = layernum
#         self.Neuronsnum = Neuronsnum
#         self.input = input
#
#     def buildnetwork(self,list):
#         x = self.input
#         for i in range(0, self.layernum):
#             layers = layer(self.Neuronsnum-i*16,x,list)
#             x = layers.buildlayer()
#         predictions = Dense(1, activation='linear')(x)
#         return predictions
Exemplo n.º 6
0
def UNet(input_shape, learn_rate=1e-3):
    l2_lambda = 0.0002
    DropP = 0.3
    kernel_size = 3

    inputs = Input(input_shape)

    conv1a = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(inputs)

    conv1a = bn()(conv1a)

    conv1b = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv1a)

    conv1b = bn()(conv1b)

    merge1 = concatenate([conv1a, conv1b])

    conv1c = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

    conv1c = bn()(conv1c)

    merge2 = concatenate([conv1a, conv1b, conv1c])

    conv1d = Conv2D(32, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

    conv1d = bn()(conv1d)

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1d)

    pool1 = Dropout(DropP)(pool1)

    #############################

    conv2a = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool1)

    conv2a = bn()(conv2a)

    conv2b = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv2a)

    conv2b = bn()(conv2b)

    merge1 = concatenate([conv2a, conv2b])

    conv2c = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

    conv2c = bn()(conv2c)

    merge2 = concatenate([conv2a, conv2b, conv2c])

    conv2d = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

    conv2d = bn()(conv2d)

    merge3 = concatenate([conv2a, conv2b, conv2c, conv2d])

    conv2e = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

    conv2e = bn()(conv2e)

    merge4 = concatenate([conv2a, conv2b, conv2c, conv2d, conv2e])

    conv2f = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

    conv2f = bn()(conv2f)

    merge5 = concatenate([conv2a, conv2b, conv2c, conv2d, conv2e, conv2f])

    conv2g = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

    conv2g = bn()(conv2g)

    merge6 = concatenate(
        [conv2a, conv2b, conv2c, conv2d, conv2e, conv2f, conv2g])

    conv2h = Conv2D(64, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

    conv2h = bn()(conv2h)

    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2h)

    pool2 = Dropout(DropP)(pool2)

    #############################

    conv3a = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool2)

    conv3a = bn()(conv3a)

    conv3b = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv3a)

    conv3b = bn()(conv3b)

    merge1 = concatenate([conv3a, conv3b])

    conv3c = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

    conv3c = bn()(conv3c)

    merge2 = concatenate([conv3a, conv3b, conv3c])

    conv3d = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

    conv3d = bn()(conv3d)

    merge3 = concatenate([conv3a, conv3b, conv3c, conv3d])

    conv3e = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

    conv3e = bn()(conv3e)

    merge4 = concatenate([conv3a, conv3b, conv3c, conv3d, conv3e])

    conv3f = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

    conv3f = bn()(conv3f)

    merge5 = concatenate([conv3a, conv3b, conv3c, conv3d, conv3e, conv3f])

    conv3g = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

    conv3g = bn()(conv3g)

    merge6 = concatenate(
        [conv3a, conv3b, conv3c, conv3d, conv3e, conv3f, conv3g])

    conv3h = Conv2D(128, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

    conv3h = bn()(conv3h)

    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3h)

    pool3 = Dropout(DropP)(pool3)

    #############################
    conv4a = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool3)

    conv4a = bn()(conv4a)

    conv4b = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv4a)

    conv4b = bn()(conv4b)

    merge1 = concatenate([conv4a, conv4b])

    conv4c = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

    conv4c = bn()(conv4c)

    merge2 = concatenate([conv4a, conv4b, conv4c])

    conv4d = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

    conv4d = bn()(conv4d)

    merge3 = concatenate([conv4a, conv4b, conv4c, conv4d])

    conv4e = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

    conv4e = bn()(conv4e)

    merge4 = concatenate([conv4a, conv4b, conv4c, conv4d, conv4e])

    conv4f = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

    conv4f = bn()(conv4f)

    merge5 = concatenate([conv4a, conv4b, conv4c, conv4d, conv4e, conv4f])

    conv4g = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

    conv4g = bn()(conv4g)

    merge6 = concatenate(
        [conv4a, conv4b, conv4c, conv4d, conv4e, conv4f, conv4g])

    conv4h = Conv2D(256, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

    conv4h = bn()(conv4h)

    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4h)

    pool4 = Dropout(DropP)(pool4)

    #############################
    conv5a = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool4)

    conv5a = bn()(conv5a)

    conv5b = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv5a)

    conv5b = bn()(conv5b)

    merge1 = concatenate([conv5a, conv5b])

    conv5c = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

    conv5c = bn()(conv5c)

    merge2 = concatenate([conv5a, conv5b, conv5c])

    conv5d = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

    conv5d = bn()(conv5d)

    merge3 = concatenate([conv5a, conv5b, conv5c, conv5d])

    conv5e = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

    conv5e = bn()(conv5e)

    merge4 = concatenate([conv5a, conv5b, conv5c, conv5d, conv5e])

    conv5f = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

    conv5f = bn()(conv5f)

    merge5 = concatenate([conv5a, conv5b, conv5c, conv5d, conv5e, conv5f])

    conv5g = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

    conv5g = bn()(conv5g)

    merge6 = concatenate(
        [conv5a, conv5b, conv5c, conv5d, conv5e, conv5f, conv5g])

    conv5h = Conv2D(512, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

    conv5h = bn()(conv5h)

    flatten_block = Flatten()(conv5h)

    final_op = Dense(15000, activation='softmax',
                     name='final_op')(flatten_block)
    model = Model(inputs=inputs, outputs=final_op)
    model.compile(optimizer='adadelta',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    return model
Exemplo n.º 7
0
from keras.layers import Input, Dense
from keras.models import Model
from keras.layers.core import Flatten, Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization as bn
from keras.layers.pooling import MaxPooling1D as pool
from keras.layers.convolutional import Conv1D as cnn1
import numpy as np

# This returns a tensor
inputs = Input(shape=(72, 1))
import keras.utils

# a layer instance is callable on a tensor, and returns a tensor
x = cnn1(64, 3)(inputs)
x = bn()(x)
x = Activation('relu')(x)
x = cnn1(64, 3)(inputs)
x = bn()(x)
x = Activation('relu')(x)
x = pool()(x)
x = cnn1(128, 3)(inputs)
x = bn()(x)
x = Activation('relu')(x)
x = cnn1(128, 3)(inputs)
x = bn()(x)
x = Activation('relu')(x)
x = pool()(x)
x = cnn1(256, 3)(inputs)
x = bn()(x)
x = Activation('relu')(x)
    def train(self, x_train, y_train, epochs=1200, batch_size=4):
        """
        this is the training function for
        :param x_train:
        :param y_train:
        :return:
        """
        l2_lambda = 0.0002
        DropP = 0.3
        kernel_size = 3
        input_shape = (512, 512, 1)
        inputs = Input(input_shape)
        input_prob = Input(input_shape)
        input_prob_inverse = Input(input_shape)
        # Conv3D(filters,(3,3,3),sjfsjf)
        conv1 = Conv2D(32, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(inputs)
        conv1 = bn()(conv1)
        conv1 = Conv2D(32, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv1)
        conv1 = bn()(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        pool1 = Dropout(DropP)(pool1)

        conv2 = Conv2D(64, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(pool1)
        conv2 = bn()(conv2)
        conv2 = Conv2D(64, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv2)
        conv2 = bn()(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        pool2 = Dropout(DropP)(pool2)

        conv3 = Conv2D(128, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(pool2)
        conv3 = bn()(conv3)
        conv3 = Conv2D(128, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv3)
        conv3 = bn()(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
        pool3 = Dropout(DropP)(pool3)

        conv4 = Conv2D(256, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(pool3)
        conv4 = bn()(conv4)
        conv4 = Conv2D(256, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv4)
        conv4 = bn()(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
        pool4 = Dropout(DropP)(pool4)

        conv5 = Conv2D(512, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(pool4)
        conv5 = bn()(conv5)
        conv5 = Conv2D(512, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv5)
        conv5 = bn()(conv5)

        up6 = concatenate([
            Conv2DTranspose(256, (2, 2), strides=(2, 2),
                            padding='same')(conv5), conv4
        ],
                          name='up6',
                          axis=3)
        up6 = Dropout(DropP)(up6)
        conv6 = Conv2D(256, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(up6)
        conv6 = bn()(conv6)
        conv6 = Conv2D(256, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv6)

        conv6 = bn()(conv6)
        up7 = concatenate([
            Conv2DTranspose(128, (2, 2), strides=(2, 2),
                            padding='same')(conv6), conv3
        ],
                          name='up7',
                          axis=3)
        up7 = Dropout(DropP)(up7)
        conv7 = Conv2D(128, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(up7)
        conv7 = bn()(conv7)
        conv7 = Conv2D(128, (3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv7)
        conv7 = bn()(conv7)

        up8 = concatenate([
            Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7),
            conv2
        ],
                          name='up8',
                          axis=3)
        up8 = Dropout(DropP)(up8)
        conv8 = Conv2D(64, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(up8)
        conv8 = bn()(conv8)
        conv8 = Conv2D(64, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv8)
        conv8 = bn()(conv8)

        up9 = concatenate([
            Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8),
            conv1
        ],
                          name='up9',
                          axis=3)
        up9 = Dropout(DropP)(up9)
        conv9 = Conv2D(32, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(up9)
        conv9 = bn()(conv9)
        conv9 = Conv2D(32, (kernel_size, kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(l2_lambda))(conv9)
        conv9 = bn()(conv9)

        conv10 = Conv2D(1, (1, 1), activation='sigmoid', name='conv10')(conv9)

        model = Model(inputs=[inputs], outputs=[conv10])
        model.compile(optimizer=Adam(lr=1e-5),
                      loss=UNET_Classifier.dice_coef_loss,
                      metrics=[UNET_Classifier.dice_coef])
        print(model.summary())

        # training network
        model.fit([x_train], [y_train],
                  batch_size=batch_size,
                  epochs=epochs,
                  shuffle=True)

        # set as class's model to be used for prediction
        self.trained_model = model

        return model
Exemplo n.º 9
0
    def unet(self, input_shape, learn_rate=1e-3):
        """
        Creates a U-Net model with it's corresponding convolutional layer network

        Arguments
        ---------
            input_shape : tuple
                Shape of the input images
            learn_rate : float
                Learning rate of the model

        Returns
        -------
            model : U-Net model
        """
        inputs = Input(input_shape)

        conv1 = Conv2D(32, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(inputs)
        conv1 = bn()(conv1)
        conv1 = Conv2D(32, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv1)
        conv1 = bn()(conv1)

        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        pool1 = Dropout(self.DropP)(pool1)

        conv2 = Conv2D(64, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(pool1)
        conv2 = bn()(conv2)

        conv2 = Conv2D(64, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv2)
        conv2 = bn()(conv2)

        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        pool2 = Dropout(self.DropP)(pool2)

        conv3 = Conv2D(128, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(pool2)
        conv3 = bn()(conv3)
        conv3 = Conv2D(128, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv3)
        conv3 = bn()(conv3)

        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
        pool3 = Dropout(self.DropP)(pool3)

        conv4 = Conv2D(256, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(pool3)
        conv4 = bn()(conv4)
        conv4 = Conv2D(256, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv4)
        conv4 = bn()(conv4)

        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
        pool4 = Dropout(self.DropP)(pool4)

        conv5 = Conv2D(512, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(pool4)
        conv5 = bn()(conv5)
        conv5 = Conv2D(512, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv5)
        conv5 = bn()(conv5)

        up6 = concatenate([
            Conv2DTranspose(256, (2, 2), strides=(2, 2),
                            padding='same')(conv5), conv4
        ],
                          name='up6',
                          axis=3)
        up6 = Dropout(self.DropP)(up6)

        conv6 = Conv2D(256, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(self.l2_lambda))(up6)
        conv6 = bn()(conv6)
        conv6 = Conv2D(256, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv6)
        conv6 = bn()(conv6)

        up7 = concatenate([
            Conv2DTranspose(128, (2, 2), strides=(2, 2),
                            padding='same')(conv6), conv3
        ],
                          name='up7',
                          axis=3)
        up7 = Dropout(self.DropP)(up7)

        conv7 = Conv2D(128, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(self.l2_lambda))(up7)
        conv7 = bn()(conv7)
        conv7 = Conv2D(128, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv7)
        conv7 = bn()(conv7)

        up8 = concatenate([
            Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7),
            conv2
        ],
                          name='up8',
                          axis=3)
        up8 = Dropout(self.DropP)(up8)

        conv8 = Conv2D(64, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(self.l2_lambda))(up8)
        conv8 = bn()(conv8)
        conv8 = Conv2D(64, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv8)
        conv8 = bn()(conv8)

        up9 = concatenate([
            Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8),
            conv1
        ],
                          name='up9',
                          axis=3)
        up9 = Dropout(self.DropP)(up9)

        conv9 = Conv2D(32, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(self.l2_lambda))(up9)
        conv9 = bn()(conv9)
        conv9 = Conv2D(32, (self.kernel_size, self.kernel_size),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=regularizers.l2(
                           self.l2_lambda))(conv9)
        conv9 = bn()(conv9)

        conv10 = Conv2D(3, (1, 1), activation='sigmoid', name='conv10')(conv9)

        model = Model(input=inputs, output=conv10)
        model.compile(optimizer=Adam(lr=1e-4),
                      loss=dice_coef_loss,
                      metrics=[dice_coef])
        model.summary()

        return model
Exemplo n.º 10
0
    def get_model_seq():
        model = Sequential()

        if convLayer > 0:
            model.add(
                Convolution1D(convLayer,
                              3,
                              border_mode='same',
                              input_shape=(x_train_cv.shape[1],
                                           x_train_cv.shape[2])))
            model.add(AveragePooling1D(pool_size=3))
            #model.add(Conv1D(1, 3, padding='same', input_shape=(x_train_cv.shape[1], x_train_cv.shape[2])))
            model.add(Flatten())
        if csfLayer > 0:
            model.add(
                CSF3var(force_unitcenter=True,
                        input_shape=(x_train_cv.shape[1], x_train_cv.shape[2],
                                     x_train_cv.shape[3])))
            model.add(AveragePooling1D(pool_size=(3)))
            model.add(
                AveragePooling1D(pool_size=(3), data_format='channels_first'))
            model.add(Flatten())

        if database == 3:
            model.add(
                Flatten(input_shape=(
                    x_train_cv.shape[1],
                    x_train_cv.shape[2],
                )))
            model.add(
                Dense(no_neurons,
                      activation='relu',
                      input_shape=(int(x_train_cv.shape[1] *
                                       x_train_cv.shape[2]), )))
        else:
            model.add(
                Dense(no_neurons,
                      activation='relu',
                      input_shape=(x_train_cv.shape[1], )))
        if bn_active:
            model.add(bn())

        for i in range(no_layers - 2):
            model.add(Dense(no_neurons, activation='relu'))
            if bn_active:
                model.add(bn())

        model.add(Dense(no_neurons, activation='relu'))
        model.add(Dense(num_classes, activation='sigmoid'))

        adam = optimizers.adam(lr=0.001,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-08,
                               decay=0.0,
                               amsgrad=False)
        if database in [0, 1, 2, 6]:
            model.compile(optimizer=adam,
                          loss='binary_crossentropy',
                          metrics=['accuracy'])
        elif database in [4, 5]:
            model.compile(optimizer=adam, loss='mean_squared_error')
        else:
            model.compile(optimizer=adam,
                          loss='binary_crossentropy',
                          metrics=['accuracy'])
        model.summary()

        return model
Exemplo n.º 11
0
def UNet(input_shape,learn_rate=1e-3):
    l2_lambda = 0.0002
    DropP = 0.3
    kernel_size=3

    inputs = Input(input_shape)

    conv1 = Conv2D( 32, (kernel_size, kernel_size), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(inputs)
    
    
    conv1 = bn()(conv1)
    
    conv1 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv1)

    conv1 = bn()(conv1)
    
    
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    pool1 = Dropout(DropP)(pool1)





    conv2 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(pool1)
    
    conv2 = bn()(conv2)

    conv2 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv2)

    conv2 = bn()(conv2)
    
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    pool2 = Dropout(DropP)(pool2)



    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(pool2)

    conv3 = bn()(conv3)
    
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv3)
    
    conv3 = bn()(conv3)

    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    pool3 = Dropout(DropP)(pool3)



    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(pool3)
    conv4 = bn()(conv4)
    
    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv4)
    
    conv4 = bn()(conv4)
    
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    pool4 = Dropout(DropP)(pool4)



    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(pool4)
    
    conv5 = bn()(conv5)

    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv5)

    conv5 = bn()(conv5)
    
    up6 = concatenate([Conv2DTranspose(256,(2, 2), strides=(2, 2), padding='same')(conv5), conv4],name='up6', axis=3)

    up6 = Dropout(DropP)(up6)


    conv6 = Conv2D(256,(3, 3), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(up6)
    
    conv6 = bn()(conv6)

    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv6)

    conv6 = bn()(conv6)

    up7 = concatenate([Conv2DTranspose(128,(2, 2), strides=(2, 2), padding='same')(conv6), conv3],name='up7', axis=3)

    up7 = Dropout(DropP)(up7)

    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(up7)

    conv7 = bn()(conv7)
    
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv7)

    conv7 = bn()(conv7)

    up8 = concatenate([Conv2DTranspose(64,(2, 2), strides=(2, 2), padding='same')(conv7), conv2],name='up8', axis=3)

    up8 = Dropout(DropP)(up8)

    conv8 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same', 
                   kernel_regularizer=regularizers.l2(l2_lambda) )(up8)

    conv8 = bn()(conv8)

    
    conv8 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv8)

    conv8 = bn()(conv8)

    up9 = concatenate([Conv2DTranspose(32,(2, 2), strides=(2, 2), padding='same')(conv8), conv1],name='up9',axis=3)

    up9 = Dropout(DropP)(up9)

    conv9 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(up9)
    
    conv9 = bn()(conv9)

    conv9 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda) )(conv9)
   
    conv9 = bn()(conv9)
   
    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
    model = Model(inputs=inputs, outputs=conv10)
    model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
    return model
def Comp_U_net(input_shape, learn_rate=1e-3):

    l2_lambda = 0.0002
    DropP = 0.3
    kernel_size = 3

    inputs = Input(input_shape, name='ip0')

    conv0a = Conv3D(64, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(inputs)

    conv0a = bn()(conv0a)

    conv0b = Conv3D(64, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv0a)

    conv0b = bn()(conv0b)

    pool0 = MaxPooling3D(pool_size=(2, 2, 2))(conv0b)

    pool0 = Dropout(DropP)(pool0)

    conv1a = Conv3D(128, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool0)

    conv1a = bn()(conv1a)

    conv1b = Conv3D(128, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv1a)

    conv1b = bn()(conv1b)

    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1b)

    pool1 = Dropout(DropP)(pool1)

    conv2a = Conv3D(256, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool1)

    conv2a = bn()(conv2a)

    conv2b = Conv3D(256, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv2a)

    conv2b = bn()(conv2b)

    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2b)

    pool2 = Dropout(DropP)(pool2)

    conv5b = Conv3D(512, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool2)

    conv5b = bn()(conv5b)

    up6 = concatenate([
        Conv3DTranspose(256,
                        (2, 2, 2), strides=(2, 2, 2), padding='same')(conv5b),
        (conv2b)
    ],
                      name='up6',
                      axis=3)

    up6 = Dropout(DropP)(up6)

    conv6a = Conv3D(256, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(up6)

    conv6a = bn()(conv6a)

    up7 = concatenate([
        Conv3DTranspose(128,
                        (2, 2, 2), strides=(2, 2, 2), padding='same')(conv6a),
        (conv1b)
    ],
                      name='up7',
                      axis=3)

    up7 = Dropout(DropP)(up7)
    #add second output here

    conv7a = Conv3D(128, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(up7)

    conv7a = bn()(conv7a)

    up8 = concatenate([
        Conv3DTranspose(64,
                        (2, 2, 2), strides=(2, 2, 2), padding='same')(conv7a),
        (conv0b)
    ],
                      name='up8',
                      axis=3)

    up8 = Dropout(DropP)(up8)

    conv8a = Conv3D(64, (kernel_size, kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(up8)

    conv8a = bn()(conv8a)

    final_op = Conv3D(1, (1, 1, 1), activation='sigmoid',
                      name='final_op')(conv8a)

    #----------------------------------------------------------------------------------------------------------------------------------

    #second branch - brain
    xup6 = concatenate([
        Conv3DTranspose(256,
                        (2, 2, 2), strides=(2, 2, 2), padding='same')(conv5b),
        (conv2b)
    ],
                       name='xup6',
                       axis=3)

    xup6 = Dropout(DropP)(xup6)

    xconv6a = Conv3D(256, (kernel_size, kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xup6)

    xconv6a = bn()(xconv6a)

    xup7 = concatenate([
        Conv3DTranspose(128,
                        (2, 2, 2), strides=(2, 2, 2), padding='same')(xconv6a),
        (conv1b)
    ],
                       name='xup7',
                       axis=3)

    xup7 = Dropout(DropP)(xup7)

    xconv7a = Conv3D(128, (kernel_size, kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xup7)

    xconv7a = bn()(xconv7a)

    xup8 = concatenate([
        Conv3DTranspose(64,
                        (2, 2, 2), strides=(2, 2, 2), padding='same')(xconv7a),
        (conv0b)
    ],
                       name='xup8',
                       axis=3)

    xup8 = Dropout(DropP)(xup8)
    #add third xoutxout here

    xconv8a = Conv3D(64, (kernel_size, kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xup8)

    xconv8a = bn()(xconv8a)

    xfinal_op = Conv3D(1, (1, 1, 1), activation='sigmoid',
                       name='xfinal_op')(xconv8a)

    #-----------------------------third branch

    #Concatenation fed to the reconstruction layer of all 3

    x_u_net_op0 = keras.layers.concatenate(
        [final_op, xfinal_op,
         keras.layers.add([final_op, xfinal_op])],
        name='res_a')

    #multiply with input

    res_1_conv0a = Conv3D(
        64, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(x_u_net_op0)

    res_1_conv0a = bn()(res_1_conv0a)

    res_1_conv0b = Conv3D(
        64, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv0a)

    res_1_conv0b = bn()(res_1_conv0b)

    res_1_pool0 = MaxPooling3D(pool_size=(2, 2, 2))(res_1_conv0b)

    res_1_pool0 = Dropout(DropP)(res_1_pool0)

    res_1_conv1a = Conv3D(
        128, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool0)

    res_1_conv1a = bn()(res_1_conv1a)

    res_1_conv1b = Conv3D(
        128, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv1a)

    res_1_conv1b = bn()(res_1_conv1b)

    res_1_pool1 = MaxPooling3D(pool_size=(2, 2, 2))(res_1_conv1b)

    res_1_pool1 = Dropout(DropP)(res_1_pool1)

    res_1_conv2a = Conv3D(
        256, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool1)

    res_1_conv2a = bn()(res_1_conv2a)

    res_1_conv2b = Conv3D(
        256, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv2a)

    res_1_conv2b = bn()(res_1_conv2b)

    res_1_pool2 = MaxPooling3D(pool_size=(2, 2, 2))(res_1_conv2b)

    res_1_pool2 = Dropout(DropP)(res_1_pool2)

    res_1_conv5b = Conv3D(
        512, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool2)

    res_1_conv5b = bn()(res_1_conv5b)

    res_1_up6 = concatenate([
        Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2),
                        padding='same')(res_1_conv5b), (res_1_conv2b)
    ],
                            name='res_1_up6',
                            axis=3)

    res_1_up6 = Dropout(DropP)(res_1_up6)

    res_1_conv6a = Conv3D(
        256, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up6)

    res_1_conv6a = bn()(res_1_conv6a)

    res_1_up7 = concatenate([
        Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2),
                        padding='same')(res_1_conv6a), (res_1_conv1b)
    ],
                            name='res_1_up7',
                            axis=3)

    res_1_up7 = Dropout(DropP)(res_1_up7)
    #add second res_1_output here
    res_1_conv7a = Conv3D(
        128, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up7)

    res_1_conv7a = bn()(res_1_conv7a)

    res_1_up8 = concatenate([
        Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2),
                        padding='same')(res_1_conv7a), (res_1_conv0b)
    ],
                            name='res_1_up8',
                            axis=3)

    res_1_up8 = Dropout(DropP)(res_1_up8)
    #add third outout here
    res_1_conv8a = Conv3D(
        64, (kernel_size, kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up8)

    res_1_conv8a = bn()(res_1_conv8a)

    res_1_final_op = Conv3D(1, (1, 1, 1),
                            activation='sigmoid',
                            name='res_1_final_op')(res_1_conv8a)

    model = Model(inputs=[inputs],
                  outputs=[
                      final_op,
                      xfinal_op,
                      res_1_final_op,
                  ])
    #res_2_final_op,
    #res_2_xfinal_op,
    #res_3_final_op,])
    #sgd = optimizers.SGD(lr=0.01, decay=1e-8, momentum=0.8, nesterov=True)
    model.compile(optimizer=keras.optimizers.Adam(lr=5e-5),
                  loss={
                      'final_op': neg_dice_coef_loss,
                      'xfinal_op': dice_coef_loss,
                      'res_1_final_op': 'mse'
                  })
    #'res_2_final_op':neg_dice_coef_loss,
    #'res_2_xfinal_op':dice_coef_loss,
    #'res_3_final_op':'mse'})
    print(model.summary())
    return model
Exemplo n.º 13
0
    def UNet(self, input_shape, learn_rate=1e-3):
        l2_lambda = 0.0002
        DropP = 0.3
        kernel_size = 3

        inputs = Input(input_shape)

        conv1a = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(inputs)

        conv1a = bn()(conv1a)

        conv1b = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(conv1a)

        conv1b = bn()(conv1b)

        merge1 = concatenate([conv1a, conv1b])

        conv1c = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

        conv1c = bn()(conv1c)

        merge2 = concatenate([conv1a, conv1b, conv1c])

        conv1d = Conv2D(32, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

        conv1d = bn()(conv1d)

        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1d)

        pool1 = Dropout(DropP)(pool1)

        #############################

        conv2a = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(pool1)

        conv2a = bn()(conv2a)

        conv2b = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(conv2a)

        conv2b = bn()(conv2b)

        merge1 = concatenate([conv2a, conv2b])

        conv2c = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

        conv2c = bn()(conv2c)

        merge2 = concatenate([conv2a, conv2b, conv2c])

        conv2d = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

        conv2d = bn()(conv2d)

        merge3 = concatenate([conv2a, conv2b, conv2c, conv2d])

        conv2e = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

        conv2e = bn()(conv2e)

        merge4 = concatenate([conv2a, conv2b, conv2c, conv2d, conv2e])

        conv2f = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

        conv2f = bn()(conv2f)

        merge5 = concatenate([conv2a, conv2b, conv2c, conv2d, conv2e, conv2f])

        conv2g = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

        conv2g = bn()(conv2g)

        merge6 = concatenate(
            [conv2a, conv2b, conv2c, conv2d, conv2e, conv2f, conv2g])

        conv2h = Conv2D(64, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

        conv2h = bn()(conv2h)

        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2h)

        pool2 = Dropout(DropP)(pool2)

        #############################

        conv3a = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(pool2)

        conv3a = bn()(conv3a)

        conv3b = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(conv3a)

        conv3b = bn()(conv3b)

        merge1 = concatenate([conv3a, conv3b])

        conv3c = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

        conv3c = bn()(conv3c)

        merge2 = concatenate([conv3a, conv3b, conv3c])

        conv3d = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

        conv3d = bn()(conv3d)

        merge3 = concatenate([conv3a, conv3b, conv3c, conv3d])

        conv3e = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

        conv3e = bn()(conv3e)

        merge4 = concatenate([conv3a, conv3b, conv3c, conv3d, conv3e])

        conv3f = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

        conv3f = bn()(conv3f)

        merge5 = concatenate([conv3a, conv3b, conv3c, conv3d, conv3e, conv3f])

        conv3g = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

        conv3g = bn()(conv3g)

        merge6 = concatenate(
            [conv3a, conv3b, conv3c, conv3d, conv3e, conv3f, conv3g])

        conv3h = Conv2D(128, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

        conv3h = bn()(conv3h)

        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3h)

        pool3 = Dropout(DropP)(pool3)

        #############################
        conv4a = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(pool3)

        conv4a = bn()(conv4a)

        conv4b = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(conv4a)

        conv4b = bn()(conv4b)

        merge1 = concatenate([conv4a, conv4b])

        conv4c = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

        conv4c = bn()(conv4c)

        merge2 = concatenate([conv4a, conv4b, conv4c])

        conv4d = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

        conv4d = bn()(conv4d)

        merge3 = concatenate([conv4a, conv4b, conv4c, conv4d])

        conv4e = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

        conv4e = bn()(conv4e)

        merge4 = concatenate([conv4a, conv4b, conv4c, conv4d, conv4e])

        conv4f = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

        conv4f = bn()(conv4f)

        merge5 = concatenate([conv4a, conv4b, conv4c, conv4d, conv4e, conv4f])

        conv4g = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

        conv4g = bn()(conv4g)

        merge6 = concatenate(
            [conv4a, conv4b, conv4c, conv4d, conv4e, conv4f, conv4g])

        conv4h = Conv2D(256, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

        conv4h = bn()(conv4h)

        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4h)

        pool4 = Dropout(DropP)(pool4)

        #############################
        conv5a = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(pool4)

        conv5a = bn()(conv5a)

        conv5b = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(conv5a)

        conv5b = bn()(conv5b)

        merge1 = concatenate([conv5a, conv5b])

        conv5c = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge1)

        conv5c = bn()(conv5c)

        merge2 = concatenate([conv5a, conv5b, conv5c])

        conv5d = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge2)

        conv5d = bn()(conv5d)

        merge3 = concatenate([conv5a, conv5b, conv5c, conv5d])

        conv5e = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge3)

        conv5e = bn()(conv5e)

        merge4 = concatenate([conv5a, conv5b, conv5c, conv5d, conv5e])

        conv5f = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge4)

        conv5f = bn()(conv5f)

        merge5 = concatenate([conv5a, conv5b, conv5c, conv5d, conv5e, conv5f])

        conv5g = Conv2D(12, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge5)

        conv5g = bn()(conv5g)

        merge6 = concatenate(
            [conv5a, conv5b, conv5c, conv5d, conv5e, conv5f, conv5g])

        conv5h = Conv2D(512, (kernel_size, kernel_size),
                        activation='relu',
                        padding='same',
                        kernel_regularizer=regularizers.l2(l2_lambda))(merge6)

        conv5h = bn()(conv5h)

        flatten_block = Flatten()(conv5h)

        #####################################
        #branch 2
        inputtwo = Input(shape=(1, ), dtype='float32', name='inputtwo')
        #xmerge1=concatenate([flatten_block,inputtwo])
        #####################################
        #branch 3
        xinputtwo = Input(shape=(1000, ), dtype='float32', name='xinputtwo')
        xlayerone = Dense(32, activation='relu')(xinputtwo)
        xlayertwo = Dense(64, activation='relu')(xlayerone)

        xlayerthree = Dense(128, activation='relu')(xlayertwo)

        xlayerfour = Dense(256, activation='relu')(xlayerthree)
        ########################################
        final_merge = concatenate([flatten_block, inputtwo, xlayerfour])
        #mixing the input of the three branches

        after_merger_layers_1 = Dense(32, activation='relu')(final_merge)
        after_merger_layers_2 = Dense(64,
                                      activation='relu')(after_merger_layers_1)
        after_merger_layers_3 = Dense(128,
                                      activation='relu')(after_merger_layers_2)
        after_merger_layers_4 = Dense(256,
                                      activation='relu')(after_merger_layers_3)

        final_op = Dense(15000, activation='softmax',
                         name='final_op')(after_merger_layers_4)

        model = Model(inputs=[inputs, inputtwo, xinputtwo], outputs=final_op)
        model.compile(optimizer='adagrad',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()

        return model
def u_net(input_shape, dropout_rate, l2_lambda):

    # Encoder
    input = Input(shape=input_shape, name="input")
    conv1_1 = Conv2D(32, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv1_1")(input)
    conv1_1 = bn(name="conv1_1_bn")(conv1_1)
    conv1_2 = Conv2D(32, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv1_2")(conv1_1)
    conv1_2 = bn(name="conv1_2_bn")(conv1_2)
    pool1 = MaxPooling2D(name="pool1")(conv1_2)
    drop1 = Dropout(dropout_rate)(pool1)

    conv2_1 = Conv2D(64, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv2_1")(pool1)
    conv2_1 = bn(name="conv2_1_bn")(conv2_1)
    conv2_2 = Conv2D(64, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv2_2")(conv2_1)
    conv2_2 = bn(name="conv2_2_bn")(conv2_2)
    pool2 = MaxPooling2D(name="pool2")(conv2_2)
    drop2 = Dropout(dropout_rate)(pool2)

    conv3_1 = Conv2D(128, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv3_1")(pool2)
    conv3_1 = bn(name="conv3_1_bn")(conv3_1)
    conv3_2 = Conv2D(128, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv3_2")(conv3_1)
    conv3_2 = bn(name="conv3_2_bn")(conv3_2)
    pool3 = MaxPooling2D(name="pool3")(conv3_2)
    drop3 = Dropout(dropout_rate)(pool3)

    conv4_1 = Conv2D(256, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv4_1")(pool3)
    conv4_1 = bn(name="conv4_1_bn")(conv4_1)
    conv4_2 = Conv2D(256, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv4_2")(conv4_1)
    conv4_2 = bn(name="conv4_2_bn")(conv4_2)
    pool4 = MaxPooling2D(name="pool4")(conv4_2)
    drop4 = Dropout(dropout_rate)(pool4)

    conv5_1 = Conv2D(512, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv5_1")(pool4)
    conv5_1 = bn(name="conv5_1_bn")(conv5_1)
    conv5_2 = Conv2D(512, (3, 3),
                     padding="same",
                     activation='relu',
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv5_2")(conv5_1)
    conv5_2 = bn(name="conv5_2_bn")(conv5_2)

    # Decoder
    upconv6 = Conv2DTranspose(256, (2, 2), strides=(2, 2),
                              padding='same')(conv5_2)
    upconv6 = Dropout(dropout_rate)(upconv6)
    concat6 = concatenate([conv4_2, upconv6], name="concat6")
    conv6_1 = Conv2D(256, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv6_1")(concat6)
    conv6_1 = bn(name="conv6_1_bn")(conv6_1)
    conv6_2 = Conv2D(256, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv6_2")(conv6_1)
    conv6_2 = bn(name="conv6_2_bn")(conv6_2)

    upconv7 = Conv2DTranspose(128, (2, 2), strides=(2, 2),
                              padding='same')(conv6_2)
    upconv7 = Dropout(dropout_rate)(upconv7)
    concat7 = concatenate([conv3_2, upconv7], name="concat7")
    conv7_1 = Conv2D(128, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv7_1")(concat7)
    conv7_1 = bn(name="conv7_1_bn")(conv7_1)
    conv7_2 = Conv2D(128, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv7_2")(conv7_1)
    conv7_2 = bn(name="conv7_2_bn")(conv7_2)

    upconv8 = Conv2DTranspose(64, (2, 2), strides=(2, 2),
                              padding='same')(conv7_2)
    upconv8 = Dropout(dropout_rate)(upconv8)
    concat8 = concatenate([conv2_2, upconv8], name="concat8")
    conv8_1 = Conv2D(64, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv8_1")(concat8)
    conv8_1 = bn(name="conv8_1_bn")(conv8_1)
    conv8_2 = Conv2D(64, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv8_2")(conv8_1)
    conv8_2 = bn(name="conv8_2_bn")(conv8_2)

    upconv9 = Conv2DTranspose(32, (2, 2), strides=(2, 2),
                              padding='same')(conv8_2)
    upconv9 = Dropout(dropout_rate)(upconv9)
    concat9 = concatenate([conv1_2, upconv9], name="concat9")
    conv9_1 = Conv2D(32, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv9_1")(concat9)
    conv9_1 = bn(name="conv9_1_bn")(conv9_1)
    conv9_2 = Conv2D(32, (3, 3),
                     padding="same",
                     kernel_regularizer=regularizers.l2(l2_lambda),
                     name="conv9_2")(conv9_1)
    conv9_2 = bn(name="conv9_2_bn")(conv9_2)
    dropout = Dropout(dropout_rate)(conv9_2)

    conv10 = Conv2D(1, (1, 1),
                    padding="same",
                    activation='sigmoid',
                    name="conv10")(dropout)

    model = Model(input, conv10)

    return model
Exemplo n.º 15
0
def Comp_U_Net(input_shape, learn_rate=1e-3):

    l2_lambda = 0.0002
    DropP = 0.3
    kernel_size = 3

    inputs = Input(input_shape, name='ip0')

    conv0a = Conv2D(32, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(inputs)

    conv0a = bn()(conv0a)

    conv0b = Conv2D(32, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv0a)

    conv0b = bn()(conv0b)

    pool0 = MaxPooling2D(pool_size=(2, 2))(conv0b)

    pool0 = Dropout(DropP)(pool0)

    conv1a = Conv2D(32, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool0)

    conv1a = bn()(conv1a)

    conv1b = Conv2D(32, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv1a)

    conv1b = bn()(conv1b)

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1b)

    pool1 = Dropout(DropP)(pool1)

    conv2a = Conv2D(64, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool1)

    conv2a = bn()(conv2a)

    conv2b = Conv2D(64, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv2a)

    conv2b = bn()(conv2b)

    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2b)

    pool2 = Dropout(DropP)(pool2)

    conv3a = Conv2D(128, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool2)

    conv3a = bn()(conv3a)

    conv3b = Conv2D(128, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv3a)

    conv3b = bn()(conv3b)

    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3b)

    pool3 = Dropout(DropP)(pool3)

    conv4a = Conv2D(256, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool3)

    conv4a = bn()(conv4a)

    conv4b = Conv2D(256, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv4a)

    conv4b = bn()(conv4b)

    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4b)

    pool4 = Dropout(DropP)(pool4)

    conv5a = Conv2D(512, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(pool4)

    conv5a = bn()(conv5a)

    conv5b = Conv2D(512, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv5a)

    conv5b = bn()(conv5b)

    up6 = concatenate([
        Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5b),
        (conv4b)
    ],
                      name='up6',
                      axis=3)

    up6 = Dropout(DropP)(up6)

    conv6a = Conv2D(256, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(up6)

    conv6a = bn()(conv6a)

    conv6b = Conv2D(256, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv6a)

    conv6b = bn()(conv6b)

    up7 = concatenate([
        Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6b),
        (conv3b)
    ],
                      name='up7',
                      axis=3)

    up7 = Dropout(DropP)(up7)
    #add second output here

    conv7a = Conv2D(128, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(up7)

    conv7a = bn()(conv7a)

    conv7b = Conv2D(128, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv7a)

    conv7b = bn()(conv7b)

    up8 = concatenate([
        Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7b),
        (conv2b)
    ],
                      name='up8',
                      axis=3)

    up8 = Dropout(DropP)(up8)

    conv8a = Conv2D(64, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(up8)

    conv8a = bn()(conv8a)

    conv8b = Conv2D(64, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv8a)

    conv8b = bn()(conv8b)

    up9 = concatenate([
        Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8b),
        (conv1b)
    ],
                      name='up9',
                      axis=3)

    conv9a = Conv2D(32, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(up9)

    conv9a = bn()(conv9a)

    conv9b = Conv2D(12, (kernel_size, kernel_size),
                    activation='relu',
                    padding='same',
                    kernel_regularizer=regularizers.l2(l2_lambda))(conv9a)

    conv9b = bn()(conv9b)

    up10 = concatenate([
        Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv9b),
        (conv0b)
    ],
                       name='up10',
                       axis=3)

    conv10a = Conv2D(32, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(up10)

    conv10a = bn()(conv10a)

    conv10b = Conv2D(32, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(conv10a)

    conv10b = bn()(conv10b)

    final_op = Conv2D(1, (1, 1), activation='sigmoid',
                      name='final_op')(conv10b)

    #----------------------------------------------------------------------------------------------------------------------------------

    #second branch - brain
    xup6 = concatenate([
        Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5b),
        (conv4b)
    ],
                       name='xup6',
                       axis=3)

    xup6 = Dropout(DropP)(xup6)

    xconv6a = Conv2D(256, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xup6)

    xconv6a = bn()(xconv6a)

    xconv6b = Conv2D(256, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xconv6a)

    xconv6b = bn()(xconv6b)

    xup7 = concatenate([
        Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(xconv6b),
        (conv3b)
    ],
                       name='xup7',
                       axis=3)

    xup7 = Dropout(DropP)(xup7)

    xconv7a = Conv2D(128, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xup7)

    xconv7a = bn()(xconv7a)

    xconv7b = Conv2D(128, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xconv7a)

    xconv7b = bn()(xconv7b)

    xup8 = concatenate([
        Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(xconv7b),
        (conv2b)
    ],
                       name='xup8',
                       axis=3)

    xup8 = Dropout(DropP)(xup8)
    #add third xoutxout here

    xconv8a = Conv2D(64, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xup8)

    xconv8a = bn()(xconv8a)

    xconv8b = Conv2D(64, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xconv8a)

    xconv8b = bn()(xconv8b)

    xup9 = concatenate([
        Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(xconv8b),
        (conv1b)
    ],
                       name='xup9',
                       axis=3)

    xup9 = Dropout(DropP)(xup9)

    xconv9a = Conv2D(32, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xup9)

    xconv9a = bn()(xconv9a)

    xconv9b = Conv2D(32, (kernel_size, kernel_size),
                     activation='relu',
                     padding='same',
                     kernel_regularizer=regularizers.l2(l2_lambda))(xconv9a)

    xconv9b = bn()(xconv9b)

    xup10 = concatenate([
        Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(xconv9b),
        (conv0b)
    ],
                        name='xup10',
                        axis=3)

    xup10 = Dropout(DropP)(xup10)

    xconv10a = Conv2D(32, (kernel_size, kernel_size),
                      activation='relu',
                      padding='same',
                      kernel_regularizer=regularizers.l2(l2_lambda))(xup10)

    xconv10a = bn()(xconv10a)

    xconv10b = Conv2D(32, (kernel_size, kernel_size),
                      activation='relu',
                      padding='same',
                      kernel_regularizer=regularizers.l2(l2_lambda))(xconv10a)

    xconv10b = bn()(xconv10b)

    xfinal_op = Conv2D(1, (1, 1), activation='sigmoid',
                       name='xfinal_op')(xconv10b)

    #-----------------------------third branch

    #Concatenation fed to the reconstruction layer of all 3

    x_u_net_op0 = keras.layers.concatenate(
        [final_op, xfinal_op,
         keras.layers.add([final_op, xfinal_op])],
        name='res_a')

    res_1_conv0a = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(x_u_net_op0)

    res_1_conv0a = bn()(res_1_conv0a)

    res_1_conv0b = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv0a)

    res_1_conv0b = bn()(res_1_conv0b)

    res_1_pool0 = MaxPooling2D(pool_size=(2, 2))(res_1_conv0b)

    res_1_pool0 = Dropout(DropP)(res_1_pool0)

    res_1_conv1a = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool0)

    res_1_conv1a = bn()(res_1_conv1a)

    res_1_conv1b = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv1a)

    res_1_conv1b = bn()(res_1_conv1b)

    res_1_pool1 = MaxPooling2D(pool_size=(2, 2))(res_1_conv1b)

    res_1_pool1 = Dropout(DropP)(res_1_pool1)

    res_1_conv2a = Conv2D(
        64, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool1)

    res_1_conv2a = bn()(res_1_conv2a)

    res_1_conv2b = Conv2D(
        64, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv2a)

    res_1_conv2b = bn()(res_1_conv2b)

    res_1_pool2 = MaxPooling2D(pool_size=(2, 2))(res_1_conv2b)

    res_1_pool2 = Dropout(DropP)(res_1_pool2)

    res_1_conv3a = Conv2D(
        128, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool2)

    res_1_conv3a = bn()(res_1_conv3a)

    res_1_conv3b = Conv2D(
        128, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv3a)

    res_1_conv3b = bn()(res_1_conv3b)

    res_1_pool3 = MaxPooling2D(pool_size=(2, 2))(res_1_conv3b)

    res_1_pool3 = Dropout(DropP)(res_1_pool3)

    res_1_conv4a = Conv2D(
        256, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool3)

    res_1_conv4a = bn()(res_1_conv4a)

    res_1_conv4b = Conv2D(
        256, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv4a)

    res_1_conv4b = bn()(res_1_conv4b)

    res_1_pool4 = MaxPooling2D(pool_size=(2, 2))(res_1_conv4b)

    res_1_pool4 = Dropout(DropP)(res_1_pool4)

    res_1_conv5a = Conv2D(
        512, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_pool4)

    res_1_conv5a = bn()(res_1_conv5a)

    res_1_conv5b = Conv2D(
        512, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv5a)

    res_1_conv5b = bn()(res_1_conv5b)

    res_1_up6 = concatenate([
        Conv2DTranspose(256,
                        (2, 2), strides=(2, 2), padding='same')(res_1_conv5b),
        (res_1_conv4b)
    ],
                            name='res_1_up6',
                            axis=3)

    res_1_up6 = Dropout(DropP)(res_1_up6)

    res_1_conv6a = Conv2D(
        256, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up6)

    res_1_conv6a = bn()(res_1_conv6a)

    res_1_conv6b = Conv2D(
        256, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv6a)

    res_1_conv6b = bn()(res_1_conv6b)

    res_1_up7 = concatenate([
        Conv2DTranspose(128,
                        (2, 2), strides=(2, 2), padding='same')(res_1_conv6b),
        (res_1_conv3b)
    ],
                            name='res_1_up7',
                            axis=3)

    res_1_up7 = Dropout(DropP)(res_1_up7)
    #add second res_1_output here
    res_1_conv7a = Conv2D(
        128, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up7)

    res_1_conv7a = bn()(res_1_conv7a)

    res_1_conv7b = Conv2D(
        128, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv7a)

    res_1_conv7b = bn()(res_1_conv7b)

    res_1_up8 = concatenate([
        Conv2DTranspose(64,
                        (2, 2), strides=(2, 2), padding='same')(res_1_conv7b),
        (res_1_conv2b)
    ],
                            name='res_1_up8',
                            axis=3)

    res_1_up8 = Dropout(DropP)(res_1_up8)
    #add third outout here
    res_1_conv8a = Conv2D(
        64, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up8)

    res_1_conv8a = bn()(res_1_conv8a)

    res_1_conv8b = Conv2D(
        64, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv8a)

    res_1_conv8b = bn()(res_1_conv8b)

    res_1_up9 = concatenate([
        Conv2DTranspose(32,
                        (2, 2), strides=(2, 2), padding='same')(res_1_conv8b),
        (res_1_conv1b)
    ],
                            name='res_1_up9',
                            axis=3)

    res_1_up9 = Dropout(DropP)(res_1_up9)

    res_1_conv9a = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up9)

    res_1_conv9a = bn()(res_1_conv9a)

    res_1_conv9b = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv9a)

    res_1_conv9b = bn()(res_1_conv9b)

    res_1_up10 = concatenate([
        Conv2DTranspose(32,
                        (2, 2), strides=(2, 2), padding='same')(res_1_conv9b),
        (res_1_conv0b)
    ],
                             name='res_1_up10',
                             axis=3)

    res_1_up10 = Dropout(DropP)(res_1_up10)

    res_1_conv10a = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_up10)

    res_1_conv10a = bn()(res_1_conv10a)

    res_1_conv10b = Conv2D(
        32, (kernel_size, kernel_size),
        activation='relu',
        padding='same',
        kernel_regularizer=regularizers.l2(l2_lambda))(res_1_conv10a)

    res_1_conv10b = bn()(res_1_conv10b)

    res_1_final_op = Conv2D(1, (1, 1),
                            activation='sigmoid',
                            name='res_1_final_op')(res_1_conv10b)

    model = Model(inputs=[inputs],
                  outputs=[
                      final_op,
                      xfinal_op,
                      res_1_final_op,
                  ])

    #print("Training using multiple GPUs...")
    #model = multi_gpu_model(model, gpus=1)

    model.compile(optimizer=keras.optimizers.Adam(lr=1e-5),
                  loss={
                      'final_op': dice_coef_loss,
                      'xfinal_op': neg_dice_coef_loss,
                      'res_1_final_op': 'mse'
                  })

    return model
Exemplo n.º 16
0
from keras.layers import Activation, Flatten, Dense, Dropout
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization as bn

labels_new = []
for i in range(len(labels)):
    if int(labels[i]) < 20:
        labels_new.append(0)
    elif int(labels[i]) < 30:
        labels_new.append(1)
    elif int(labels[i]) < 40:
        labels_new.append(2)

model = Sequential()
model.add(Dense(12, input_dim=3, init='uniform', activation='relu'))
model.add(bn())
model.add(Dropout(0.4))

model.add(Dense(10, input_dim=2, init='uniform', activation='relu'))
model.add(bn())
model.add(Dropout(0.4))
model.add(Dense(6, init='uniform', activation='relu'))
model.add(bn())
model.add(Dropout(0.4))
model.add(Dense(3, init='uniform', activation='softmax'))
# Compile model
model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print(model.predict(np.array([[13.0706, 77.7250, 2]])))
Exemplo n.º 17
0
    def train(self,
              x_train,
              y_train,
              v_train,
              of_train,
              of_mag_train,
              epochs=1200,
              batch_size=4):
        """
        this is the training function for
        :param x_train:
        :param y_train:
        :param v_train : the variance image for the sample
        :param of_train : the optical flow for the sample 
        :return: 
        """
        l2_lambda = 0.0002
        DropP = 0.3
        kernel_size = 3
        input_shape = (640, 640, 1)

        main_input = Input(input_shape, name='main_input')
        var_input = Input(input_shape, name='v_input')
        of_input = Input((640, 640, 3), name='of_input')
        of_mag_input = Input(input_shape, name='of_mag_input')

        input_prob_inverse = Input(input_shape)
        # Conv3D(filters,(3,3,3),sjfsjf)
        if self.trained_model is None:
            merger = concatenate(
                [main_input, var_input, of_input, of_mag_input])

            conv1 = Conv2D(
                32, (kernel_size, kernel_size),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(merger)
            conv1 = bn()(conv1)
            conv1 = Conv2D(
                32, (kernel_size, kernel_size),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv1)
            conv1 = bn()(conv1)
            pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
            pool1 = Dropout(DropP)(pool1)

            conv2 = Conv2D(
                64, (kernel_size, kernel_size),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(pool1)
            conv2 = bn()(conv2)
            conv2 = Conv2D(
                64, (kernel_size, kernel_size),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv2)
            conv2 = bn()(conv2)
            pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
            pool2 = Dropout(DropP)(pool2)

            conv3 = Conv2D(
                128, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(pool2)
            conv3 = bn()(conv3)
            conv3 = Conv2D(
                128, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv3)
            conv3 = bn()(conv3)
            pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
            pool3 = Dropout(DropP)(pool3)

            conv4 = Conv2D(
                256, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(pool3)
            conv4 = bn()(conv4)
            conv4 = Conv2D(
                256, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv4)
            conv4 = bn()(conv4)
            pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
            pool4 = Dropout(DropP)(pool4)

            conv5 = Conv2D(
                512, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(pool4)
            conv5 = bn()(conv5)
            conv5 = Conv2D(
                512, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv5)
            conv5 = bn()(conv5)

            up6 = concatenate([
                Conv2DTranspose(256, (2, 2), strides=(2, 2),
                                padding='same')(conv5), conv4
            ],
                              name='up6',
                              axis=3)
            up6 = Dropout(DropP)(up6)
            conv6 = Conv2D(256, (3, 3),
                           activation='relu',
                           padding='same',
                           kernel_regularizer=regularizers.l2(l2_lambda))(up6)
            conv6 = bn()(conv6)
            conv6 = Conv2D(
                256, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv6)

            conv6 = bn()(conv6)
            up7 = concatenate([
                Conv2DTranspose(128, (2, 2), strides=(2, 2),
                                padding='same')(conv6), conv3
            ],
                              name='up7',
                              axis=3)
            up7 = Dropout(DropP)(up7)
            conv7 = Conv2D(128, (3, 3),
                           activation='relu',
                           padding='same',
                           kernel_regularizer=regularizers.l2(l2_lambda))(up7)
            conv7 = bn()(conv7)
            conv7 = Conv2D(
                128, (3, 3),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv7)
            conv7 = bn()(conv7)

            up8 = concatenate([
                Conv2DTranspose(64, (2, 2), strides=(2, 2),
                                padding='same')(conv7), conv2
            ],
                              name='up8',
                              axis=3)
            up8 = Dropout(DropP)(up8)
            conv8 = Conv2D(64, (kernel_size, kernel_size),
                           activation='relu',
                           padding='same',
                           kernel_regularizer=regularizers.l2(l2_lambda))(up8)
            conv8 = bn()(conv8)
            conv8 = Conv2D(
                64, (kernel_size, kernel_size),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv8)
            conv8 = bn()(conv8)

            up9 = concatenate([
                Conv2DTranspose(32, (2, 2), strides=(2, 2),
                                padding='same')(conv8), conv1
            ],
                              name='up9',
                              axis=3)
            up9 = Dropout(DropP)(up9)
            conv9 = Conv2D(32, (kernel_size, kernel_size),
                           activation='relu',
                           padding='same',
                           kernel_regularizer=regularizers.l2(l2_lambda))(up9)
            conv9 = bn()(conv9)
            conv9 = Conv2D(
                32, (kernel_size, kernel_size),
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(l2_lambda))(conv9)
            conv9 = bn()(conv9)

            conv10 = Conv2D(1, (1, 1), activation='sigmoid',
                            name='conv10')(conv9)

            model = Model(
                inputs=[main_input, var_input, of_input, of_mag_input],
                outputs=[conv10])
            model.compile(optimizer=Adam(lr=1e-5),
                          loss=Four_Input_UNET_Segmenter.dice_coef_loss,
                          metrics=[Four_Input_UNET_Segmenter.dice_coef])

        else:
            # incase the model is loaded from file, set that as the initial stage
            model = self.trained_model

        print(model.summary())

        # training network
        model.fit([x_train, v_train, of_train, of_mag_train], [y_train],
                  batch_size=batch_size,
                  epochs=epochs,
                  shuffle=True)

        # set as class's model to be used for prediction
        self.trained_model = model

        return model
Exemplo n.º 18
0
2. 使用keras 泛型编程API
'''

# load dataset
dataframe = pandas.read_csv("bj_housing.csv", header=0)
dataset = dataframe.values
# split into input (X) and output (Y) variables
train = dataset[:len(dataset) * 9 // 10]
X_train = np.array([[row[i] for i in range(0, len(row)) if i != 1]
                    for row in train])
Y_train = np.array([[x[1]] for x in train])  # train[:,:1]
test = dataset[len(dataset) * 9 // 10:]
X_test = np.array([[row[i] for i in range(0, len(row)) if i != 1]
                   for row in train])
Y_test = np.array([[x[1]] for x in train])  # train[:,:1]
'''
泛型:(x)
bn():BatchNormalization
'''
inputs = Input(shape=(6, ))
x = Dense(20)(inputs)
x = bn()(x)
x = Activation('relu')(x)
x = Dense(40)(x)
x = bn()(x)
x = Activation('relu')(x)
x = Dense(60)(x)
x = bn()(x)
x = Activation('relu')(x)
x = Dense(40)(x)
x = bn()(x)
Exemplo n.º 19
0
def dilatedCNN(input_shape, l2_lambda=0.0001, dropP=0.1):
    """
    Implement the multi-scale context aggregation by dilated convolution by Yu et. al. in ICLR 2016
    
    Argument: 
        input_shape: input data shape
        l2_lambda: l2 normalization parameter
        dropP: drop_out rate during training
    
    """

    inputs = Input(input_shape)

    conv1 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=1,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(inputs)

    conv1 = bn()(conv1)

    conv1 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=1,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv1)

    conv1 = bn()(conv1)

    conv2 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=1,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv1)

    conv2 = bn()(conv2)

    conv3 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=2,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv2)

    conv3 = bn()(conv3)

    conv4 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=4,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv3)

    conv4 = bn()(conv4)

    conv5 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=8,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv4)

    conv5 = bn()(conv5)

    conv6 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=16,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv5)

    conv6 = bn()(conv6)

    conv7 = Conv2D(N_featuremaps, (3, 3),
                   dilation_rate=1,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv6)

    conv7 = bn()(conv7)

    conv8 = Conv2D(192, (1, 1),
                   dilation_rate=1,
                   activation='relu',
                   padding='same',
                   kernel_regularizer=regularizers.l2(l2_lambda),
                   kernel_initializer=initializers.he_normal())(conv7)

    conv8 = bn()(conv8)

    conv9 = Conv2D(1, (1, 1), activation='sigmoid')(conv8)

    model = Model(inputs=inputs, outputs=conv9)
    return model