Beispiel #1
0
def getnet1(inshape):
    print("Building model 1...")
    print("Input shape is", inshape)
    # inshape is (160, 448, 500) = 35 840 000
    model = Sequential()
    model.add(
        Convolution3D(1, 5, 9, 21, input_shape=inshape, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (78, 220, 240) = 4 118 400
    model.add(Convolution3D(1, 7, 11, 21, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (36, 105, 110) = 415 800
    model.add(Convolution3D(1, 7, 6, 11, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (15, 50, 50) = 37 500
    model.add(Convolution3D(1, 6, 9, 9, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (5, 21, 21) = 2 205
    model.add(Flatten())
    model.add(Dense(120, activation="sigmoid"))
    model.add(Dense(1, activation="sigmoid"))

    print("Compiling model...")
    model.compile(RMSprop(lr=0.01), "mse")
    return model
	def __init__(self, IMG_FRAMES, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS, IMG_CLASSES):
		#build the keras model of unet
		#source: U-Net: Convolutional Networks for Biomedical Image Segmentation, by Ronneberger et al
		#make a tensor for the inputs
		self.inputs = Input((IMG_FRAMES, IMG_HEIGHT,IMG_WIDTH,IMG_CHANNELS))

		#keras defines layers of a model, such as lambda, conv2d, etc..
		s = Lambda(lambda x: x/255)(self.inputs) #128x128x1

		#conv2d(filters, kernel_size,options)

		c1 = Conv3D(32,(3,3,3),activation='relu',kernel_initializer='Orthogonal',padding='same')(s) #9 weights * 1 channel * 32 features + 1 bias * 32 features = 320
		p1 = MaxPooling3D((2,2,2))(c1)

		c2 = Conv3D(64,(3,3,3),activation='relu', kernel_initializer='Orthogonal', padding='same') (p1)#9 * 32 * 64 + 1 * 64 = 18496 parameters
		p2 = MaxPooling3D((2, 2, 2)) (c2)

		c3 = Conv3D(128, (3,3, 3), activation='relu', kernel_initializer='Orthogonal', padding='same') (p2)
		p3 = MaxPooling3D((2,2, 2)) (c3)

		r = Reshape((32,32,128))(p3)
		
		fc = Conv2D(512, (3, 3), kernel_initializer='Orthogonal', padding='same') (r)#16x16x512
		
		u4 = UpSampling2D(size=(2,2))(fc)
		c4 = Conv2DTranspose(128, (3, 3), activation='relu', kernel_initializer='Orthogonal', padding='same') (u4)

		u5 = UpSampling2D(size=(2,2))(c4)
		c5 = Conv2DTranspose(64, (3, 3), activation='relu', kernel_initializer='Orthogonal', padding='same') (u5)

		u6 = UpSampling2D(size=(2,2))(c5)
		c6 = Conv2DTranspose(32, (3, 3), activation='relu', kernel_initializer='Orthogonal', padding='same') (u6)

		self.outputs = Conv2D(IMG_CLASSES, (3, 3), activation='relu', kernel_initializer='Orthogonal', padding='same') (c6)
Beispiel #3
0
def net3(V_1, V_2, V_3):
    inputs = Input((V_1, V_2, V_3, 1))

    p1 = MaxPooling3D((2, 2, 1))(inputs)

    c1 = Conv3D(64, (2, 2, 1),
                strides=(2, 2, 1),
                activation='relu',
                padding='same')(p1)

    c1 = MaxPooling3D((2, 2, 1))(c1)

    c2 = Conv3D(512, (2, 2, 1),
                strides=(1, 1, 1),
                activation='relu',
                padding='same')(c1)

    c2 = Conv3D(512, (2, 2, 1),
                strides=(2, 2, 1),
                activation='relu',
                padding='same')(c2)

    c3 = MaxPooling3D((2, 2, 3))(c2)

    c4 = Dense(128, activation='linear')(c3)
    c5 = Dense(56, activation='linear')(c4)

    outputs = c5

    return Model(inputs=[inputs], outputs=[outputs])
Beispiel #4
0
def build_model(dropout_rate=0.2):
    input_image = Input(shape=IMAGE_SHAPE, dtype='float32', name=INPUT_IMAGE)
    x = MaxPooling2D()(input_image)
    x = MaxPooling2D()(x)
    x = MaxPooling2D()(x)
    x = MaxPooling2D()(x)
    x = Dropout(dropout_rate)(x)
    x = Conv2D(32, kernel_size=3, strides=(2, 2))(x)
    x = MaxPooling2D()(x)
    x = Conv2D(32, kernel_size=3, strides=(2, 2))(x)
    x = MaxPooling2D()(x)
    x = Dropout(dropout_rate)(x)
    image_out = Flatten()(x)
    # image_out = Dense(32, activation='relu')(conv)

    input_lidar_panorama = Input(shape=PANORAMA_SHAPE,
                                 dtype='float32',
                                 name=INPUT_LIDAR_PANORAMA)
    x = pool_and_conv(input_lidar_panorama)
    x = pool_and_conv(x)
    x = Dropout(dropout_rate)(x)
    panorama_out = Flatten()(x)

    input_lidar_slices = Input(shape=SLICES_SHAPE,
                               dtype='float32',
                               name=INPUT_LIDAR_SLICES)
    x = MaxPooling3D(pool_size=(2, 2, 1))(input_lidar_slices)
    x = Conv3D(32, kernel_size=3, strides=(2, 2, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 1))(x)
    x = Dropout(dropout_rate)(x)
    x = Conv3D(32, kernel_size=2, strides=(2, 2, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 1))(x)
    x = Dropout(dropout_rate)(x)
    slices_out = Flatten()(x)

    x = keras.layers.concatenate([image_out, panorama_out, slices_out])

    x = Dense(32, activation='relu')(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(32, activation='relu')(x)

    pose_output = Dense(9, name=OUTPUT_POSE)(x)

    model = Model(
        inputs=[input_image, input_lidar_panorama, input_lidar_slices],
        outputs=[pose_output])

    # Fix error with TF and Keras
    import tensorflow as tf
    tf.python.control_flow_ops = tf

    model.compile(loss='mean_squared_error', optimizer='adam')

    return model
Beispiel #5
0
def Unet_3d(input_img, n_filters=8, dropout=0.2, batch_norm=True):

    c1 = conv_block(input_img, n_filters, 3, batch_norm)
    p1 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c1)
    p1 = Dropout(dropout)(p1)

    c2 = conv_block(p1, n_filters * 2, 3, batch_norm)
    p2 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c2)
    p2 = Dropout(dropout)(p2)

    c3 = conv_block(p2, n_filters * 4, 3, batch_norm)
    p3 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c3)
    p3 = Dropout(dropout)(p3)

    c4 = conv_block(p3, n_filters * 8, 3, batch_norm)
    p4 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c4)
    p4 = Dropout(dropout)(p4)

    c5 = conv_block(p4, n_filters * 16, 3, batch_norm)

    u6 = Conv3DTranspose(n_filters * 8, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = conv_block(u6, n_filters * 8, 3, batch_norm)
    c6 = Dropout(dropout)(c6)
    u7 = Conv3DTranspose(n_filters * 4, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c6)

    u7 = concatenate([u7, c3])
    c7 = conv_block(u7, n_filters * 4, 3, batch_norm)
    c7 = Dropout(dropout)(c7)
    u8 = Conv3DTranspose(n_filters * 2, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c7)
    u8 = concatenate([u8, c2])

    c8 = conv_block(u8, n_filters * 2, 3, batch_norm)
    c8 = Dropout(dropout)(c8)
    u9 = Conv3DTranspose(n_filters, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c8)

    u9 = concatenate([u9, c1])

    c9 = conv_block(u9, n_filters, 3, batch_norm)
    outputs = Conv3D(4, (1, 1, 1), activation='softmax')(c9)
    print("!!!!!!!!!!!!!!!!!!!")
    print(outputs.shape)
    model = Model(inputs=input_img, outputs=outputs)

    return model
Beispiel #6
0
def convLSTM_Model_3():

    model = Sequential()
    model.add(
        ConvLSTM2D(filters=32,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=16,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=16,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=16,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    #model.add(Conv3D(filters=1, kernel_size=(3, 3, 3), activation='sigmoid',  padding='same', data_format='channels_last'))
    model.add(
        Conv3D(filters=1,
               kernel_size=(1, 1, 1),
               activation='sigmoid',
               padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(2, 1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(5, 1, 1), padding='same'))
    #model.add(Reshape((None, None, 1), input_shape=(None,None,None,1)))
    print(model.summary())
    return model
Beispiel #7
0
 def inputArch_4conv_batch():
     modelX = Sequential()
     modelX.add(BatchNormalization(input_shape=(10, 10, 10, 1)))
     modelX.add(
         Convolution3D(32,
                       3,
                       3,
                       3,
                       border_mode='valid',
                       init='he_normal',
                       W_regularizer=l2(.05)))
     modelX.add(BatchNormalization())
     modelX.add(
         Convolution3D(32,
                       3,
                       3,
                       3,
                       border_mode='same',
                       init='he_normal',
                       W_regularizer=l2(.05)))
     modelX.add(MaxPooling3D())
     modelX.add(BatchNormalization())
     modelX.add(
         Convolution3D(64,
                       3,
                       3,
                       3,
                       border_mode='same',
                       init='he_normal',
                       W_regularizer=l2(.05)))
     modelX.add(BatchNormalization())
     modelX.add(
         Convolution3D(64,
                       3,
                       3,
                       3,
                       border_mode='same',
                       init='he_normal',
                       W_regularizer=l2(.05)))
     modelX.add(MaxPooling3D())
     modelX.add(Activation('relu'))
     modelX.add(Flatten())
     modelX.add(BatchNormalization())
     modelX.add(Dropout(0.2))
     modelX.add(
         Dense(512,
               init='glorot_normal',
               activation='relu',
               W_regularizer=l2(.05)))
     return modelX
Beispiel #8
0
 def inputArch_2conv_batch(defineModel_args):
     modelX=Sequential()
     modelX.add(BatchNormalization(input_shape=(10,10,10,1)))
     modelX.add(Convolution3D(gridkernelsLayer1,3,3,3, init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     modelX.add(MaxPooling3D((2,2,2)))
     modelX.add(Convolution3D(gridkernelsLayer1*2,3,3,3, init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     modelX.add(MaxPooling3D((2,2,2)))
     modelX.add(Flatten())
     modelX.add(Dense(gridneuronsLayer1*2, activation='relu'))
     modelX.add(BatchNormalization())
     return modelX
Beispiel #9
0
def my_small_model():
    model = Sequential()

    model.add(
        Convolution3D(32,
                      kernel_dim1=5,
                      kernel_dim2=5,
                      kernel_dim3=5,
                      border_mode='same',
                      activation='relu',
                      dim_ordering='tf',
                      input_shape=(1, 32, 32, 32)))
    model.add(Dropout(0.2))
    model.add(
        Convolution3D(32,
                      kernel_dim1=3,
                      kernel_dim2=3,
                      kernel_dim3=3,
                      border_mode='same',
                      activation='relu',
                      dim_ordering='tf'))
    model.add(
        MaxPooling3D(pool_size=(3, 3, 3),
                     border_mode='same',
                     dim_ordering='tf'))  #(strides=None)
    model.add(Flatten())
    model.add(Dropout(0.3))
    model.add(Dense(16, init='normal', activation='relu'))
    model.add(Dropout(0.4))
    model.add(Dense(10, init='normal'))
    model.add(Activation('softmax'))
    return model
def conv_block(x_input,
               num_filters,
               pool=True,
               activation='relu',
               init='orthogonal'):

    x1 = Convolution3D(num_filters,
                       3,
                       3,
                       3,
                       border_mode='same',
                       W_regularizer=l2(1e-5),
                       init=init)(x_input)
    x1 = BatchNormalization(axis=1, momentum=0.99)(x1)
    # x1 = Activation('relu')(x1)
    x1 = PReLU(shared_axes=[2, 3, 4], trainable=True)(x1)
    #x1 = LeakyReLU(.01)(x1)
    #x1 = Convolution3D(num_filters / 2,3,3,3, border_mode='same',W_regularizer=l2(1e-4))(x1)
    #x1 = BatchNormalization(axis=1)(x1)
    #x1 = LeakyReLU(.1)(x1)

    if pool:
        x1 = MaxPooling3D()(x1)
    x_out = x1
    return x_out
Beispiel #11
0
 def inputArch_3conv_batch_drop(defineModel_args):
     modelX=Sequential()
     modelX.add(BatchNormalization(input_shape=(10,10,10,1)))
     modelX.add(Convolution3D(gridkernelsLayer1,3,3,3, init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     #modelX.add(MaxPooling3D())
     modelX.add(Convolution3D(gridkernelsLayer1*2,3,3,3, init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     #modelX.add(MaxPooling3D())
     modelX.add(Convolution3D(gridkernelsLayer1*2,3,3,3, init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     modelX.add(MaxPooling3D())
     modelX.add(Flatten())
     modelX.add(Dense(gridneuronsLayer1*2))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridFullyActivations))
     modelX.add(Dropout(gridDropout1))
     modelX.add(Dense(gridneuronsLayer1*2))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridFullyActivations))
     modelX.add(Dropout(gridDropout1))
     return modelX
Beispiel #12
0
def conv_block(x_input,
               num_filters,
               pool=True,
               activation='relu',
               init='orthogonal'):

    x1 = Convolution3D(num_filters,
                       3,
                       3,
                       3,
                       border_mode='same',
                       W_regularizer=l2(1e-4),
                       init=init)(x_input)
    x1 = BatchNormalization(axis=1, momentum=0.995)(x1)
    if activation == 'crelu':
        x1 = Lambda(leakyCReLU, output_shape=leakyCReLUShape)(x1)
    else:
        x1 = LeakyReLU(.01)(x1)
    # x1 = Convolution3D(num_filters,3,3,3, border_mode='same',W_regularizer=l2(1e-4))(x1)
    # x1 = BatchNormalization(axis=1)(x1)
    # x1 = LeakyReLU(.1)(x1)

    if pool:
        x1 = MaxPooling3D()(x1)
    x_out = x1
    return x_out
def conv_block(x_input,
               num_filters,
               pool=True,
               activation='relu',
               init='orthogonal'):

    x1 = Convolution3D(num_filters,
                       3,
                       3,
                       3,
                       border_mode='same',
                       W_regularizer=l2(1e-5),
                       init=init)(x_input)
    x1 = BatchNormalization(axis=1, momentum=0.99)(x1)

    x1 = LeakyReLU(.1)(x1)

    #experimental activation kinda like a double sided leaky relu
    # x1 = Activation('srelu_fixed')(x1)

    # x1 = Convolution3D(num_filters,3,3,3, border_mode='same',W_regularizer=l2(1e-4))(x1)
    # x1 = BatchNormalization(axis=1)(x1)
    # x1 = LeakyReLU(.1)(x1)

    if pool:
        x1 = MaxPooling3D()(x1)
    x_out = x1
    return x_out
Beispiel #14
0
 def inputArch_3conv_batch_drop(defineModel_args, size):
     modelX = Sequential()
     modelX.add(BatchNormalization(input_shape=(size, size, size, 1)))
     modelX.add(Convolution3D(gridkernelsLayer1, 3, 3, 3,
                              init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     #modelX.add(MaxPooling3D())
     modelX.add(Convolution3D(gridkernelsLayer1, 3, 3, 3,
                              init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     #modelX.add(MaxPooling3D())
     modelX.add(Convolution3D(gridkernelsLayer1, 3, 3, 3,
                              init=gridConvInit))
     modelX.add(BatchNormalization())
     modelX.add(Activation(gridConvActivations))
     modelX.add(MaxPooling3D())
     modelX.add(Flatten())
     modelX.add(
         Dense(gridneuronsLayer1,
               init=gridNeuronsInit,
               activation=gridFullyActivations,
               W_regularizer=l2(gridFullyL2penal1)))
     #modelX.add(Dense(gridneuronsLayer1*2))
     #modelX.add(BatchNormalization())
     #modelX.add(Activation(gridFullyActivations))
     #modelX.add(Dropout(gridDropout1))
     #modelX.add(Dense(gridneuronsLayer1*2))
     #modelX.add(BatchNormalization())
     #modelX.add(Activation(gridFullyActivations))
     #modelX.add(Dropout(gridDropout1))
     return modelX
Beispiel #15
0
def SqueezeModel2(input_tensor):
    droprate = 0.2
    x = Conv3D(filters=96,
               kernel_size=(1, 1, 1),
               strides = (1, 1, 1),
               padding='same',
               activation='relu',
               kernel_initializer=glorot_uniform(seed=1),
               bias_initializer='zeros', name="conv1")(input_tensor)

    x = Conv3D(filters=16, kernel_size=(1, 1, 1), kernel_initializer=glorot_uniform(seed=1), activation='relu', name="fire2_squeeze")(x)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(64, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire2_expand1")(x)
    expand2 = Conv3D(64, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire2_expand2")(x)
    merge1 = concatenate([expand1, expand2], axis=4, name="merge_1")
    x = Conv3D(16, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire3_squeeze")(merge1)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(64, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire3_expand1")(x)
    expand2 = Conv3D(64, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire3_expand2")(x)
    merge2 = concatenate([expand1, expand2], axis=4, name="merge_2")
    x = Conv3D(32, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire4_squeeze")(merge2)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(128, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire4_expand1")(x)
    expand2 = Conv3D(128, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire4_expand2")(x)
    merge3 = concatenate([expand1, expand2], axis=4, name="merge_3")
    maxpool4 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), name="maxpool_4")(merge3)
    x = Conv3D(filters=32, kernel_size=(1, 1, 1), activation='relu', name="fire5_squeeze")(maxpool4)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(128, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire5_expand1")(x)
    expand2 = Conv3D(128, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire5_expand2")(x)
    merge4 = concatenate([expand1, expand2], axis=4, name="merge_4")
    x = Conv3D(48, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire6_squeeze")(merge4)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(192, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire6_expand1")(x)
    expand2 = Conv3D(192, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire6_expand2")(x)
    merge5 = concatenate([expand1, expand2], axis=4, name="merge_5")
    x = Conv3D(48, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire7_squeeze")(merge5)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(192, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire7_expand1")(x)
    expand2 = Conv3D(192, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire7_expand2")(x)
    merge6 = concatenate([expand1, expand2], axis=4, name="merge_6")
    x = Conv3D(64, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire8_squeeze")(merge6)
    x = BatchNormalization()(x)
    x = Dropout(droprate)
    expand1 = Conv3D(256, (1, 1, 1), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire8_expand1")(x)
    expand2 = Conv3D(256, (3, 3, 3), kernel_initializer=glorot_uniform(seed=1), padding='same', activation='relu', name="fire8_expand2")(x)
    merge7 = concatenate([expand1, expand2], axis=4, name="merge_7")
    avgpool = AveragePooling3D(pool_size=(3, 3, 3), padding='same', name="avg8")(merge7)
    flatten = Flatten(name="flatten")(avgpool)
    output = Dense(1, activation='linear', kernel_initializer=glorot_uniform(seed=1))(flatten)

    return output
Beispiel #16
0
    def __init__(self, frame_count, image_channels=3, image_height=50, image_width=100, max_string=32, output_size=28):
        input_shape = self.get_input_shape(frame_count, image_channels, image_height, image_width)
        self.input_layer = Input(shape=input_shape, dtype='float32', name='input')

        self.zero_1 = ZeroPadding3D(padding=(1, 2, 2), name='zero_1')(self.input_layer)
        self.conv_1 = Conv3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_1')(self.zero_1)
        self.batc_1 = BatchNormalization(name='batc_1')(self.conv_1)
        self.actv_1 = Activation('relu', name='actv_1')(self.batc_1)
        self.pool_1 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_1')(self.actv_1)
        self.drop_1 = SpatialDropout3D(0.5, name='drop_1')(self.pool_1)

        self.zero_2 = ZeroPadding3D(padding=(1, 2, 2), name='zero_2')(self.drop_1)
        self.conv_2 = Conv3D(64, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_2')(self.zero_2)
        self.batc_2 = BatchNormalization(name='batc_2')(self.conv_2)
        self.actv_2 = Activation('relu', name='actv_2')(self.batc_2)
        self.pool_2 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_2')(self.actv_2)
        self.drop_2 = SpatialDropout3D(0.5, name='drop_2')(self.pool_2)

        self.zero_3 = ZeroPadding3D(padding=(1, 1, 1), name='zero_3')(self.drop_2)
        self.conv_3 = Conv3D(96, (3, 3, 3), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_3')(self.zero_3)
        self.batc_3 = BatchNormalization(name='batc_3')(self.conv_3)
        self.actv_3 = Activation('relu', name='actv_3')(self.batc_3)
        self.pool_3 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_3')(self.actv_3)
        self.drop_3 = SpatialDropout3D(0.5, name='drop_3')(self.pool_3)

        self.res = TimeDistributed(Flatten())(self.drop_3)

        self.gru_1 = Bidirectional(GRU(256, return_sequences=True, activation=None, kernel_initializer='Orthogonal', name='gru_1'), merge_mode='concat')(self.res)
        self.gru_1_actv = Activation('relu', name='gru_1_actv')(self.gru_1)
        self.gru_2 = Bidirectional(GRU(256, return_sequences=True, activation=None, kernel_initializer='Orthogonal', name='gru_2'), merge_mode='concat')(self.gru_1_actv)
        self.gru_2_actv = Activation('relu', name='gru_2_actv')(self.gru_2)

        self.dense_1 = Dense(output_size, kernel_initializer='he_normal', name='dense_1')(self.gru_2_actv)
        self.y_pred  = Activation('softmax', name='softmax')(self.dense_1)

        self.input_labels = Input(shape=[max_string], dtype='float32', name='labels')
        self.input_length = Input(shape=[1], dtype='int64', name='input_length')
        self.label_length = Input(shape=[1], dtype='int64', name='label_length')

        self.loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([self.y_pred, self.input_labels, self.input_length, self.label_length])

        self.model = Model(inputs=[self.input_layer, self.input_labels, self.input_length, self.label_length], outputs=self.loss_out)
Beispiel #17
0
    def create_model(
        self,
        rho=0.9,
        decay=0.0,
    ):

        inputs = Input(shape=(self.max_length, self.max_length,
                              self.word_embedding_size, 1))

        masked_inputs = MaskConv(0.0)(inputs)
        masked_seqs = MaskToSeq(MaskConv(0.0), 1)(inputs)

        conv = MaskConvNet(
            Conv3D(
                self.channel_size,
                (2, 2, self.conv_size),
                strides=(1, 1, self.conv_size),
                activation='tanh',
                padding='same',
                use_bias=False,
            ))(masked_inputs)

        pooling = MaskPooling(MaxPooling3D(
            (self.max_length, self.max_length, 1), padding='same'),
                              pool_mode='max')(conv)

        encoded = ConvEncoder()([pooling, masked_seqs])

        decoded = RNNDecoder(
            RNNCell(LSTM(
                self.latent_size,
                return_sequences=True,
                implementation=2,
                unroll=False,
                dropout=0.,
                recurrent_dropout=0.,
            ),
                    Dense(units=self.encoding_size, activation='tanh'),
                    dense_dropout=0.))(encoded)

        outputs = TimeDistributed(
            Dense(self.word_embedding_size, activation='tanh'))(decoded)

        model = Model(inputs, outputs)
        picked = Pick()(encoded)
        encoder = Model(inputs, picked)

        optimizer = RMSprop(
            lr=self.learning_rate,
            rho=rho,
            decay=decay,
        )
        model.compile(loss='cosine_proximity', optimizer=optimizer)
        return model, encoder
def get_net(nb_batch, patch_z, patch_height, patch_width,n_ch):
    inputs = Input((patch_z, patch_height, patch_width, n_ch))

    # conv1 = Conv3D(8, (3, 3, 3), activation='relu', padding='valid')(inputs)
    conv1 = DCNN3D(nb_batch, 1, (3, 3, 3), scope='deformconv1',norm=False)(inputs)

    # conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(conv1)
    conv2 = DCNN3D(nb_batch, 32, (3, 3, 3), scope='deformconv2')(conv1)

    conv2 = BatchNormalization(axis=-1)(conv2)

    pool2 = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(conv2)

    conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(pool2)


    conv2 = BatchNormalization(axis=-1)(conv2)

    pool2 = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(conv2)



    conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(pool2)

    conv2 = BatchNormalization(axis=-1)(conv2)

    pool2 = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(conv2)

    pool3 = Flatten()(pool2)
    fc1 = Dense(128, activation='relu')(pool3)
    fc1 = Dropout(0.2)(fc1)

    out = Dense(10, activation='relu')(fc1)
    out = Lambda(lambda out: softmax(out, axis=-1))(out)
    # model
    model = Model(inputs=[inputs], outputs=[out])

    model.compile(optimizer='Adadelta', loss='categorical_crossentropy',metrics=['categorical_crossentropy', 'accuracy'])


    return model
Beispiel #19
0
def getnet2(inshape):
    print("Building model 2...")
    # inshape is (160, 448, 500) = 35 840 000
    model = Sequential()
    model.add(
        Convolution3D(1, 61, 49, 101, input_shape=inshape, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (50, 200, 200) = 2 000 000
    model.add(Convolution3D(1, 9, 21, 21, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (21, 90, 90) = 170 100
    model.add(Convolution3D(1, 10, 10, 10, activation="relu"))
    model.add(MaxPooling3D((3, 3, 3)))
    # shape is (4, 27, 27) = 2 916
    model.add(Flatten())
    model.add(Dense(30, activation="sigmoid"))
    model.add(Dense(1, activation="sigmoid"))

    print("Compiling model...")
    model.compile(RMSprop(lr=0.01), "mse")
    return model
def residual_block(x, nb_filter, kernel_size, b, i):

    y = Conv3D(nb_filter,
               kernel_size,
               padding='same',
               name='block%d_conv%d' % (b, i),
               trainable=True)(x)
    # x = BatchNormalization(name='block1_conv1_bn')(x)
    y = Activation('relu', name='block%d_conv%d_act' % (b, i))(y)
    y = Concatenate(axis=-1)([x, y])
    print("y shape:", y.shape)
    y = MaxPooling3D((1, 2, 2), strides=(1, 2, 2),
                     name='pool%d_%d' % (b, i))(y)
    return y
Beispiel #21
0
def maxpoolModelSimple(Inputs,nclasses,nregressions,dropoutRate=0.05,momentum=0.6):
    
    x=Inputs[1]
    globals=Inputs[0]
    x=BatchNormalization(momentum=momentum)(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(12,kernel_size=(3,3,3),strides=(1,1,1),padding='valid', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_0a')(x)
    
    
    x=Convolution3D(12,kernel_size=(3,3,3),strides=(1,1,1),padding='valid', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_0')(x)
                    
    x = MaxPooling3D((2,2,2))(x)
    
    x=Convolution3D(16,kernel_size=(5,5,5),strides=(1,1,1),padding='same', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_1')(x)
    
    x = MaxPooling3D((2,2,2))(x)
    x=BatchNormalization(momentum=momentum)(x)
                    
    x=Convolution3D(4,kernel_size=(3,3,5),strides=(1,1,1),padding='same', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_2')(x)
                    
    x = MaxPooling3D((1,1,3))(x)
    
    x=BatchNormalization(momentum=momentum)(x)
    x=Flatten()(x)
    x=Concatenate()( [globals,x])
    x=Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x)
    
    predictID=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
    predictE=Dense(1, activation='linear',kernel_initializer='lecun_uniform',name='pre_Epred')(x)
    predictions = [predictID,predictE]
                   
    model = Model(inputs=Inputs, outputs=predictions)
    return model
Beispiel #22
0
    def train(self, num_frames, Xtr, ytr, lr = 1e-3, reg = 0.01, lr_decay = 1e-6, optimizer = 'Adam', \
              bsize = 32, epochs = 100, split_ratio = 0.2, verbose = 0):
        '''
        Temporal feature pooling architecture based on pretrained model
        3D max pooling (T, H, W) + fully-connected + fully_connected + softmax
        
        Xtr: training features data (sample_size, temporal/frames, height, width, filter_num/channels)
        ytr: training true lables (sample_size,)
        lr: learning rate
        reg: regularization 
        lr_decay: learning rate decay
        optimizer: optimizer method
        bsize: minibatch size
        epochs: epoch to train
        split_ratio: validation split ratio
        verbose: boolean, show process stdout (1) or not (0)
        '''
        num_classes = len(np.unique(ytr))
        # create new model

        # Temporal max pooling
        x = Input(shape = (num_frames, 7, 7, 512))
        mp = MaxPooling3D(pool_size=(3, 2, 2), strides=(3, 2, 2), padding='valid', data_format='channels_last')(x)
        mp_flat = Flatten()(mp)
        fc1 = Dense(units = 4096, kernel_regularizer=regularizers.l2(reg))(mp_flat)
        
        # fc2 = Dense(units = 256, kernel_regularizer=regularizers.l2(reg))(fc1)
        
        fc3 = Dense(units = num_classes, kernel_regularizer=regularizers.l2(reg))(fc1)
        sf = Activation('softmax')(fc3)
        add_model = Model(inputs=x, outputs=sf)
        # sgd_m = optimizers.SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
        add_model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        
        ytr = to_categorical(ytr, num_classes = num_classes)

        print('Model is Training...')
        checkpointer = ModelCheckpoint(filepath='maxpool3D_classification_weights.hdf5', verbose=1, save_best_only=True)
        
        # reduce learning rate when on plateau
        reduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
        
        hist = add_model.fit(Xtr, ytr, epochs=epochs, batch_size= bsize, validation_split = split_ratio, \
                             verbose = verbose, callbacks=[checkpointer, reduceLR])

        self.add_model = add_model
        self.hist = hist
Beispiel #23
0
def getnet3(inshape):
    print("Building model 3...")
    print("Input shape is", inshape)
    # inshape is (80, 224, 250) = 4 480 000
    model = Sequential()
    model.add(
        Convolution3D(1, 11, 25, 31, input_shape=inshape, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (35, 100, 110) = 385 000
    model.add(
        Convolution3D(1, 6, 21, 31, input_shape=inshape, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (15, 40, 40) = 24 000
    model.add(
        Convolution3D(4, 6, 21, 21, input_shape=inshape, activation="relu"))
    model.add(MaxPooling3D())
    # shape is (5, 10, 10) = 500
    model.add(Flatten())
    model.add(Dense(30, activation="sigmoid"))
    model.add(Dense(1, activation="sigmoid"))

    print("Compiling model...")
    model.compile(RMSprop(lr=0.01), "binary_crossentropy")
    return model
Beispiel #24
0
        def d_layer(layer_input, filters, f_size=3, bn=True):
            """Discriminator layer"""
            # d = Conv3D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)

            d = Conv3D(filters, kernel_size=f_size, strides=1, padding='same')(layer_input)
            # d = AveragePooling3D(padding='same', data_format="channels_last")(d)
            d = MaxPooling3D(padding='same', data_format="channels_last")(d)

            d = LeakyReLU(alpha=0.2)(d)
            # add dropout for avoid momorize of the discriminator
            d = Dropout(rate=self.settings['DROPOUT'])(d)

            if bn and self.settings['BATCH_NORMALIZATION']:
                d = BatchNormalization(momentum=0.8)(d)
            return d
Beispiel #25
0
    def FPM(self, x):
        x1 = MaxPooling3D((2, 2), strides=(1, 1), padding='same')(x)
        x1 = Conv3D(64, 1, padding='same')(x1)

        x2 = Conv3D(64, 3, padding='same')(x)

        x3 = Conv3D(64, 3, padding='same', dilation_rate=4)(x)

        x4 = Conv3D(64, 3, padding='same', dilation_rate=8)(x)

        x5 = Conv3D(64, 3, padding='same', dilation_rate=16)(x)

        x = keras.layers.concatenate([x1, x2, x3, x4, x5], axis=-1)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = SpatialDropout3D(0.25)(x)
        return x
def conv_block(x_input, num_filters, pool=True, norm=False, drop_rate=0.0):

    x1 = Convolution3D(num_filters,
                       3,
                       3,
                       3,
                       border_mode='same',
                       W_regularizer=l2(1e-4))(x_input)
    if norm:
        x1 = BatchNormalization(axis=1)(x1)
        #x1 = Lambda(relu_norm)(x1)
    if drop_rate > 0.0:
        x1 = GaussianDropout(drop_rate)(x1)

    x1 = LeakyReLU(.1)(x1)
    if pool:
        x1 = MaxPooling3D()(x1)
    x_out = x1
    return x_out
Beispiel #27
0
        def conv3d(layer_input, filters, f_size=3, bn=True):
            """Layers used during downsampling"""
            if self.settings['POOLING'] == "MAX":
                d = Conv3D(filters, kernel_size=f_size, strides=1, padding='same')(layer_input)
                d = MaxPooling3D(padding='same', data_format="channels_last")(d)
            elif self.settings['POOLING'] == "AVERAGE":
                d = Conv3D(filters, kernel_size=f_size, strides=1, padding='same')(layer_input)
                d = AveragePooling3D(padding='same', data_format="channels_last")(d)
            elif self.settings['POOLING'] == "NONE":
                d = Conv3D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)

            # GANHACKS: add dropout with specified value
            if self.settings['GANHACKS']:
                d = Dropout(rate=self.settings['DROPOUT'])(d)
            if bn and self.settings['BATCH_NORMALIZATION']:
                d = BatchNormalization(momentum=0.8)(d)
            # GANHACKS: adding gaussian noise to every layer of G (Zhao et. al. EBGAN)
            if self.settings['GANHACKS']:
                d = GaussianNoise(stddev=self.settings['GAUSSIAN_NOISE_TO_G'])(d)
            print('downsampling:\t\t\t', d.shape)
            return d
def get_3d_model(shape):
    model = Sequential()
    model.add(
        Conv3D(36,
               3,
               strides=(2, 2, 2),
               padding="same",
               activation='relu',
               input_shape=shape))
    model.add(BatchNormalization())
    model.add(Conv3D(48, 3, padding="same", activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv3D(64, 3, padding="same", activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv3D(94, 3, padding="same", activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(3, 3, 3), padding='valid'))
    model.add(Flatten())
    model.add(Dense(1524))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(Dense(750))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(Dense(370))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(Dense(180))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(Dense(50))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(Dense(2, activation='linear'))
    adam = Adam(lr=0.0005, clipvalue=5)
    model.compile(optimizer=adam, loss='mean_squared_error', metrics=['mae'])
    #backend.get_session().run(tf.initialize_all_variables())
    return model
Beispiel #29
0
def VIN(sz, k, ch_i, ch_h, ch_q, ch_a):
    map_in = Input(shape=(sz,sz))
    s1 = Input(shape=(1,), dtype='int32')
    s2 = Input(shape=(1,), dtype='int32')
    h = Conv2D(filters=ch_h, 
               kernel_size=(3,3), 
               padding='same', 
               activation='relu')(map_in)
    r = Conv2D(filters=1, 
               kernel_size=(3,3), 
               padding='same',
               bias=False,
               activation=None,
               )(h)
    conv3 = Conv2D(filters=l_q, 
                   kernel_size=(3, 3), 
                   padding='same',
                   bias=False)

    conv3b = Conv2D(filters=l_q, 
                   kernel_size=(3, 3), 
                   padding='same',
                   bias=False)
    
    q = conv3(r)

    for _ in range(k):
        #v = Lambda(lambda x: K.max(x, axis=CHANNEL_AXIS, keepdims=True)),
        #           output_shape=(sz,sz,1))(q)
        v = MaxPooling3D(pool_size=(1,1,ch_q))(q)
        rv = concatenate([r, v], axis=3)
        q = conv3b(rv)

    q_out = attention(q,s1,s2) 

    out = Dense(ch_a, activation='softmax', bias=False)(q_out)
Beispiel #30
0
def Unet(img_shape, params, path='./'):

    # print message at runtime
    if (img_shape[0] == 64 and np.size(img_shape) == 3):
        print('Create 2D U-Net network with 3 levels...\n')
    elif (img_shape[0] == 128 and np.size(img_shape) == 3):
        print('Create 2D U-Net network with 4 levels...\n')
    elif (img_shape[0] == 64 and np.size(img_shape) == 4):
        print('Create 3D U-Net network with 3 levels...\n')
    elif (img_shape[0] == 128 and np.size(img_shape) == 4):
        print('Create 3D U-Net network with 4 levels...\n')
    else:
        print('???')

    def Conv2D_Layers(prev_layer, kernel_size, nr_filts, layer_name):
        # first layer
        a = Conv2D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C1' % layer_name)(prev_layer)
        a = BatchNormalization(name='%s_BN1' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A1' % layer_name)(a)
        # second layer
        a = Conv2D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C2' % layer_name)(a)
        a = BatchNormalization(name='%s_BN2' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A2' % layer_name)(a)
        return a

    def Conv3D_Layers(prev_layer, kernel_size, nr_filts, layer_name):
        # first layer
        a = Conv3D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C1' % layer_name)(prev_layer)
        a = BatchNormalization(name='%s_BN1' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A1' % layer_name)(a)
        # second layer
        a = Conv3D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C2' % layer_name)(a)
        a = BatchNormalization(name='%s_BN2' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A2' % layer_name)(a)
        return a

    img_input = Input(shape=img_shape, name='Image')

    # U-Net Encoder - upper level
    if (np.size(img_shape) == 3):
        # 2-D network
        e1c = Conv2D_Layers(prev_layer=img_input,
                            nr_filts=int(params['coarse_dim'] / 16),
                            kernel_size=params['kernel_size'],
                            layer_name='E1')
        e1 = MaxPooling2D(pool_size=(2, 2), name='E1_P')(e1c)
        e1 = Dropout(params['dropout'] * 0.5, name='E1_D2')(e1)
    elif (np.size(img_shape) == 4):
        # 3-D network
        e1c = Conv3D_Layers(prev_layer=img_input,
                            nr_filts=int(params['coarse_dim'] / 16),
                            kernel_size=(params['kernel_size'],
                                         params['kernel_size'],
                                         params['kernel_size']),
                            layer_name='E1')
        e1 = MaxPooling3D(pool_size=(2, 2, 2), name='E1_P')(e1c)
        e1 = Dropout(params['dropout'] * 0.5, name='E1_D2')(e1)

    # U-Net Encoder - second level
    if (np.size(img_shape) == 3):
        # 2-D network
        e2c = Conv2D_Layers(prev_layer=e1,
                            nr_filts=int(params['coarse_dim'] / 8),
                            kernel_size=params['kernel_size'],
                            layer_name='E2')
        e2 = MaxPooling2D(pool_size=(2, 2), name='E2_P')(e2c)
        e2 = Dropout(params['dropout'], name='E2_D2')(e2)
    elif (np.size(img_shape) == 4):
        # 3-D network
        e2c = Conv3D_Layers(prev_layer=e1,
                            nr_filts=int(params['coarse_dim'] / 8),
                            kernel_size=(params['kernel_size'],
                                         params['kernel_size'],
                                         params['kernel_size']),
                            layer_name='E2')
        e2 = MaxPooling3D(pool_size=(2, 2, 2), name='E2_P')(e2c)
        e2 = Dropout(params['dropout'], name='E2_D2')(e2)

    # U-Net Encoder - third level
    if (np.size(img_shape) == 3):
        # 2-D network
        e3c = Conv2D_Layers(prev_layer=e2,
                            nr_filts=int(params['coarse_dim'] / 4),
                            kernel_size=params['kernel_size'],
                            layer_name='E3')
        e3 = MaxPooling2D(pool_size=(2, 2), name='E3_P')(e3c)
        e3 = Dropout(params['dropout'], name='E3_D2')(e3)
    elif (np.size(img_shape) == 4):
        # 3-D network
        e3c = Conv3D_Layers(prev_layer=e2,
                            nr_filts=int(params['coarse_dim'] / 4),
                            kernel_size=(params['kernel_size'],
                                         params['kernel_size'],
                                         params['kernel_size']),
                            layer_name='E3')
        e3 = MaxPooling3D(pool_size=(2, 2, 2), name='E3_P')(e3c)
        e3 = Dropout(params['dropout'], name='E3_D2')(e3)

    if (img_shape[0] >= 64 and img_shape[0] < 128):
        # U-Net Encoder - bottom level
        if (np.size(img_shape) == 3):
            # 2-D network
            b = Conv2D_Layers(prev_layer=e3,
                              nr_filts=int(params['coarse_dim'] / 2),
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size']),
                              layer_name='B')

            d3 = Conv2DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2),
                                 padding='same',
                                 name='D3_DC')(b)
        elif (np.size(img_shape) == 4):
            # 3-D network
            b = Conv3D_Layers(prev_layer=e3,
                              nr_filts=int(params['coarse_dim'] / 2),
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size'],
                                           params['kernel_size']),
                              layer_name='B')

            d3 = Conv3DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2, 2),
                                 padding='same',
                                 name='D3_DC')(b)
    elif (img_shape[0] >= 128):
        if (np.size(img_shape) == 3):
            # 2-D network
            # U-Net Encoder - fourth level
            e4c = Conv2D_Layers(prev_layer=e3,
                                nr_filts=int(params['coarse_dim'] / 2),
                                kernel_size=params['kernel_size'],
                                layer_name='E4')
            e4 = MaxPooling2D(pool_size=(2, 2), name='E4_P')(e4c)
            e4 = Dropout(params['dropout'], name='E4_D2')(e4)

            # U-Net Encoder - bottom level
            b = Conv2D_Layers(prev_layer=e4,
                              nr_filts=params['coarse_dim'],
                              kernel_size=params['kernel_size'],
                              layer_name='B')

            # U-Net Decoder - fourth level
            d4 = Conv2DTranspose(filters=int(params['coarse_dim'] / 2),
                                 kernel_size=params['kernel_size'],
                                 strides=(2, 2),
                                 padding='same',
                                 name='D4_DC')(b)
            d4 = concatenate([d4, e4c], name='merge_layer_E4_A2')
            d4 = Dropout(params['dropout'], name='D4_D1')(d4)
            d4 = Conv2D_Layers(prev_layer=d4,
                               nr_filts=int(params['coarse_dim'] / 2),
                               kernel_size=(params['kernel_size'],
                                            params['kernel_size']),
                               layer_name='D4')

            # U-Net Decoder - third level
            d3 = Conv2DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=params['kernel_size'],
                                 strides=(2, 2),
                                 padding='same',
                                 name='D3_DC')(d4)
        elif (np.size(img_shape) == 4):
            # 3-D network
            # U-Net Encoder - fourth level
            e4c = Conv3D_Layers(prev_layer=e3,
                                nr_filts=int(params['coarse_dim'] / 2),
                                kernel_size=(params['kernel_size'],
                                             params['kernel_size'],
                                             params['kernel_size']),
                                layer_name='E4')
            e4 = MaxPooling3D(pool_size=(2, 2, 2), name='E4_P')(e4c)
            e4 = Dropout(params['dropout'], name='E4_D2')(e4)

            # U-Net Encoder - bottom level
            b = Conv3D_Layers(prev_layer=e4,
                              nr_filts=params['coarse_dim'],
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size'],
                                           params['kernel_size']),
                              layer_name='B')

            # U-Net Decoder - fourth level
            d4 = Conv3DTranspose(filters=int(params['coarse_dim'] / 2),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2, 2),
                                 padding='same',
                                 name='D4_DC')(b)
            d4 = concatenate([d4, e4c], name='merge_layer_E4_A2')
            d4 = Dropout(params['dropout'], name='D4_D1')(d4)
            d4 = Conv3D_Layers(prev_layer=d4,
                               nr_filts=int(params['coarse_dim'] / 2),
                               kernel_size=(params['kernel_size'],
                                            params['kernel_size'],
                                            params['kernel_size']),
                               layer_name='D4')

            # U-Net Decoder - third level
            d3 = Conv3DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2, 2),
                                 padding='same',
                                 name='D3_DC')(d4)
    else:
        print('ERROR: input data have wrong dimension')

    # U-Net Decoder - third level (continue)
    if (np.size(img_shape) == 3):
        # 2-D network
        d3 = concatenate([d3, e3c], name='merge_layer_E3_A2')
        d3 = Dropout(params['dropout'], name='D3_D1')(d3)
        d3 = Conv2D_Layers(prev_layer=d3,
                           nr_filts=int(params['coarse_dim'] / 2),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D3')
    elif (np.size(img_shape) == 4):
        # 3-D network
        d3 = concatenate([d3, e3c], name='merge_layer_E3_A2')
        d3 = Dropout(params['dropout'], name='D3_D1')(d3)
        d3 = Conv3D_Layers(prev_layer=d3,
                           nr_filts=int(params['coarse_dim'] / 2),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D3')

    # U-Net Decoder - second level
    if (np.size(img_shape) == 3):
        # 2-D network
        d2 = Conv2DTranspose(filters=int(params['coarse_dim'] / 8),
                             kernel_size=params['kernel_size'],
                             strides=(2, 2),
                             padding='same',
                             name='D2_DC')(d3)
        d2 = concatenate([d2, e2c], name='merge_layer_E2_A2')
        d2 = Dropout(params['dropout'], name='D2_D1')(d2)
        d2 = Conv2D_Layers(prev_layer=d2,
                           nr_filts=int(params['coarse_dim'] / 4),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D2')
    elif (np.size(img_shape) == 4):
        # 3-D network
        d2 = Conv3DTranspose(filters=int(params['coarse_dim'] / 8),
                             kernel_size=(params['kernel_size'],
                                          params['kernel_size'],
                                          params['kernel_size']),
                             strides=(2, 2, 2),
                             padding='same',
                             name='D2_DC')(d3)
        d2 = concatenate([d2, e2c], name='merge_layer_E2_A2')
        d2 = Dropout(params['dropout'], name='D2_D1')(d2)
        d2 = Conv3D_Layers(prev_layer=d2,
                           nr_filts=int(params['coarse_dim'] / 4),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D2')

    # U-Net Decoder - upper level
    if (np.size(img_shape) == 3):
        d1 = Conv2DTranspose(filters=int(params['coarse_dim'] / 16),
                             kernel_size=params['kernel_size'],
                             strides=(2, 2),
                             padding='same',
                             name='D1_DC')(d2)
        d1 = concatenate([d1, e1c], name='merge_layer_E1_A2')
        d1 = Dropout(params['dropout'], name='D1_D1')(d1)
        d1 = Conv2D_Layers(prev_layer=d1,
                           nr_filts=int(params['coarse_dim'] / 16),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D1')
    elif (np.size(img_shape) == 4):
        d1 = Conv3DTranspose(filters=int(params['coarse_dim'] / 16),
                             kernel_size=(params['kernel_size'],
                                          params['kernel_size'],
                                          params['kernel_size']),
                             strides=(2, 2, 2),
                             padding='same',
                             name='D1_DC')(d2)
        d1 = concatenate([d1, e1c], name='merge_layer_E1_A2')
        d1 = Dropout(params['dropout'], name='D1_D1')(d1)
        d1 = Conv3D_Layers(prev_layer=d1,
                           nr_filts=int(params['coarse_dim'] / 16),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D1')

    # Outro Layer
    if (np.size(img_shape) == 3):
        output_image = Conv2D(filters=int(img_shape[-1]),
                              kernel_size=params['kernel_size'],
                              strides=(1, 1),
                              padding='same',
                              name='out_C')(d1)
    elif (np.size(img_shape) == 4):
        output_image = Conv3D(filters=int(img_shape[-1]),
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size'],
                                           params['kernel_size']),
                              strides=(1, 1, 1),
                              padding='same',
                              name='out_C')(d1)

    output_image = Activation("sigmoid", name='sigmoid')(output_image)

    model = Model(inputs=[img_input], outputs=[output_image], name='Unet')

    plot_model(model,
               to_file=path + 'model_visualization.png',
               show_shapes=True,
               show_layer_names=True)

    return model