def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, units=2 * 512 * 4 * 4))
    model.add(Activation('tanh'))
    model.add(BatchNormalization())

    # 2x4x4
    model.add(Reshape((2, 4, 4, 512), input_shape=(2 * 512 * 4 * 4, )))
    model.add(
        Conv3D(filters=512,
               kernel_size=(2, 4, 4),
               strides=(1, 1, 1),
               padding='same',
               data_format="channels_last"))
    model.add(TimeDistributed(BatchNormalization()))
    model.add(Dropout(0.5))

    # 4x8x8 image
    model.add(
        Conv3DTranspose(filters=256,
                        kernel_size=(4, 4, 4),
                        strides=(2, 2, 2),
                        padding='same'))
    model.add(TimeDistributed(BatchNormalization()))
    model.add(TimeDistributed(LeakyReLU(0.2)))
    model.add(Dropout(0.5))

    # 8x16x16 image
    model.add(
        Conv3DTranspose(filters=128,
                        kernel_size=(4, 4, 4),
                        strides=(2, 2, 2),
                        padding='same'))
    model.add(TimeDistributed(BatchNormalization()))
    model.add(TimeDistributed(LeakyReLU(0.2)))
    model.add(Dropout(0.5))

    # 16x32x32 image
    model.add(
        Conv3DTranspose(filters=64,
                        kernel_size=(4, 4, 4),
                        strides=(2, 2, 2),
                        padding='same'))
    model.add(TimeDistributed(BatchNormalization()))
    model.add(TimeDistributed(LeakyReLU(0.2)))
    model.add(Dropout(0.5))

    # 32x64x64 image
    model.add(
        Conv3DTranspose(filters=3,
                        kernel_size=(4, 4, 4),
                        strides=(2, 2, 2),
                        padding='same',
                        activation='tanh'))

    return model
Beispiel #2
0
def Unet_3d(input_img, n_filters=8, dropout=0.2, batch_norm=True):

    c1 = conv_block(input_img, n_filters, 3, batch_norm)
    p1 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c1)
    p1 = Dropout(dropout)(p1)

    c2 = conv_block(p1, n_filters * 2, 3, batch_norm)
    p2 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c2)
    p2 = Dropout(dropout)(p2)

    c3 = conv_block(p2, n_filters * 4, 3, batch_norm)
    p3 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c3)
    p3 = Dropout(dropout)(p3)

    c4 = conv_block(p3, n_filters * 8, 3, batch_norm)
    p4 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c4)
    p4 = Dropout(dropout)(p4)

    c5 = conv_block(p4, n_filters * 16, 3, batch_norm)

    u6 = Conv3DTranspose(n_filters * 8, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = conv_block(u6, n_filters * 8, 3, batch_norm)
    c6 = Dropout(dropout)(c6)
    u7 = Conv3DTranspose(n_filters * 4, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c6)

    u7 = concatenate([u7, c3])
    c7 = conv_block(u7, n_filters * 4, 3, batch_norm)
    c7 = Dropout(dropout)(c7)
    u8 = Conv3DTranspose(n_filters * 2, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c7)
    u8 = concatenate([u8, c2])

    c8 = conv_block(u8, n_filters * 2, 3, batch_norm)
    c8 = Dropout(dropout)(c8)
    u9 = Conv3DTranspose(n_filters, (3, 3, 3),
                         strides=(2, 2, 2),
                         padding='same')(c8)

    u9 = concatenate([u9, c1])

    c9 = conv_block(u9, n_filters, 3, batch_norm)
    outputs = Conv3D(4, (1, 1, 1), activation='softmax')(c9)
    print("!!!!!!!!!!!!!!!!!!!")
    print(outputs.shape)
    model = Model(inputs=input_img, outputs=outputs)

    return model
Beispiel #3
0
def unet_model_3d_crop(img_width=128, img_height=128, TIME=10):
    '''
    Modified from https://keunwoochoi.wordpress.com/2017/10/11/u-net-on-keras-2-0/
    '''
    # n_ch_exps = [4, 5, 6, 7, 8, 9]   #the n-th deep channel's exponent i.e. 2**n 16,32,64,128,256
    n_ch_exps = [3,4,5,6,7]   #the n-th deep channel's exponent i.e. 2**n 16,32,64,128,256
    k_size = (3, 3, 3)                  #size of filter kernel
    k_init = 'he_normal'             #kernel initializer

    if K.image_data_format() == 'channels_first':
        ch_axis = 1
        input_shape = (1, TIME, img_width, img_height)
    elif K.image_data_format() == 'channels_last':
        ch_axis = 4
        input_shape = (img_width, img_height, TIME, 1)

    inp = Input(shape=input_shape)
    encodeds = []

    # encoder
    enc = inp
    print(n_ch_exps)
    for l_idx, n_ch in enumerate(n_ch_exps):
        enc = Conv3D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(enc)
        enc = Dropout(0.15*l_idx,)(enc)
        enc = Conv3D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(enc)
        encodeds.append(enc)
        #print(l_idx, enc)
        if n_ch < n_ch_exps[-1]:  #do not run max pooling on the last encoding/downsampling step
            enc = MaxPooling3D(pool_size=(2,2,1))(enc)
    
    # decoder
    dec = enc
    print(n_ch_exps[::-1][1:])
    decoder_n_chs = n_ch_exps[::-1][1:]
    for l_idx, n_ch in enumerate(decoder_n_chs):
        l_idx_rev = len(n_ch_exps) - l_idx - 2  #
        dec = Conv3DTranspose(filters=2**n_ch, kernel_size=k_size, strides=(2,2,1), activation='relu', padding='same', kernel_initializer=k_init)(dec)
        dec = concatenate([dec, encodeds[l_idx_rev]], axis=ch_axis)
        dec = Conv3D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(dec)
        dec = Dropout(0.15*l_idx)(dec)
        dec = Conv3D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(dec)

    outp = Conv3DTranspose(filters=1, kernel_size=k_size, activation='sigmoid', padding='same', kernel_initializer='glorot_normal')(dec)

    model = Model(inputs=[inp], outputs=[outp])
    
    return model
Beispiel #4
0
def decoder_model():
    model = Sequential()

    # 10x32x32
    model.add(
        Conv3DTranspose(filters=64,
                        kernel_size=(3, 5, 5),
                        padding='same',
                        strides=(1, 1, 1),
                        input_shape=(10, 16, 16, 128)))
    model.add(TimeDistributed(BatchNormalization()))
    # model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
    model.add(TimeDistributed(Dropout(0.5)))

    # 10x64x64
    model.add(
        Conv3DTranspose(filters=128,
                        kernel_size=(3, 5, 5),
                        padding='same',
                        strides=(1, 2, 2)))
    model.add(TimeDistributed(BatchNormalization()))
    # model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
    model.add(TimeDistributed(Dropout(0.5)))

    # 10x64x64
    model.add(
        Conv3DTranspose(filters=64,
                        kernel_size=(3, 5, 5),
                        padding='same',
                        strides=(1, 2, 2)))
    model.add(TimeDistributed(BatchNormalization()))
    # model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
    model.add(TimeDistributed(Dropout(0.5)))

    # 10x128x128
    model.add(
        Conv3DTranspose(filters=1,
                        kernel_size=(3, 5, 5),
                        strides=(1, 1, 1),
                        padding='same'))
    model.add(TimeDistributed(BatchNormalization()))
    model.add(TimeDistributed(Activation('tanh')))
    model.add(TimeDistributed(Dropout(0.5)))

    return model
Beispiel #5
0
def myUpConv3DBlock(input_tensor, filters, kernel_size, strides,
                    kernel_initializer, padding, block_name,
                    kernel_regularizer, use_batch_norm, dropout_rate):
    """
        3D transpose convolution block exploiting Keras.
        
        This function returns a "black box" containing potentially an up conv. layer, a BN layer 
        and a dropout layer. The scheme is:
        
         ... -> Transpose Conv 3D -> Activation -> BatchNorm -> Dropout -> ...
            
    """

    # NOTES:
    # 1. activation is by default "None" in Conv3DTranspose;

    output_tensor = Conv3DTranspose(
        filters=filters,
        kernel_size=kernel_size,
        strides=strides,
        kernel_initializer=kernel_initializer,
        padding=padding,
        name=block_name,
        kernel_regularizer=kernel_regularizer)(input_tensor)

    if use_batch_norm:
        output_tensor = BatchNormalization(name=block_name +
                                           '_bn')(output_tensor)

    if dropout_rate > 0.0:
        output_tensor = Dropout(rate=dropout_rate,
                                name=block_name + '_dr')(output_tensor)

    return output_tensor
Beispiel #6
0
def create_convolution_block_up(input_layer,skip_conn, n_filters, batch_normalization=True, kernel_size=(4, 4, 4), activation='relu',
                             padding='same', strides=(2, 2, 2), instance_normalization=False, dropout=True):

    # 3DConv + Normalization + Activation
    # Instance Normalization is said to perform better than Batch Normalization

    init = RandomNormal(mean=0.0, stddev=0.02) # new
    layer = Conv3DTranspose(n_filters, kernel_size, padding=padding, kernel_initializer=init, strides=strides)(input_layer)

    if batch_normalization:
        layer = BatchNormalization(axis=4)(layer)  # channel_last convention
    # elif instance_normalization:
    elif instance_normalization:
        try:
            from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
        except ImportError:
            raise ImportError("Install keras_contrib in order to use instance normalization."
                              "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git")
        layer = InstanceNormalization(axis=4)(layer)

    if dropout:
        layer = SpatialDropout3D(rate=0.5)(layer)

    layer = concatenate([layer, skip_conn], axis=4)

    layer = Activation(activation)(layer)

    return layer
    def f(input):
        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            if deconv:
                conv_1_1 = Conv3DTranspose(
                    filters=filters,
                    kernel_size=(1, 1, 1),
                    strides=strides,
                    padding="same",
                    kernel_initializer="he_normal",
                    kernel_regularizer=kernel_regularizer)(input)
            else:
                conv_1_1 = Conv3D(filters=filters,
                                  kernel_size=(1, 1, 1),
                                  strides=strides,
                                  padding="same",
                                  kernel_initializer="he_normal",
                                  kernel_regularizer=kernel_regularizer)(input)
        else:
            conv_1_1 = _bn_relu_conv3d(
                filters=filters,
                kernel_size=(1, 1, 1),
                strides=strides,
                kernel_regularizer=kernel_regularizer)(input)

        conv_3_3 = _bn_relu_conv3d(
            filters=filters,
            kernel_size=(3, 3, 3),
            kernel_regularizer=kernel_regularizer)(conv_1_1)
        residual = _bn_relu_conv3d(
            filters=filters * 4,
            kernel_size=(1, 1, 1),
            kernel_regularizer=kernel_regularizer)(conv_3_3)

        return _shortcut3d(input, residual)
Beispiel #8
0
def __transition_up_block(ip,
                          nb_filters,
                          type='deconv',
                          weight_decay=1E-4,
                          block_prefix=None):
    '''Adds an upsampling block. Upsampling operation relies on the the type parameter.
    # Arguments
        ip: input keras tensor
        nb_filters: integer, the dimensionality of the output space
            (i.e. the number output of filters in the convolution)
        type: can be 'upsampling', 'subpixel', 'deconv'. Determines
            type of upsampling performed
        weight_decay: weight decay factor
        block_prefix: str, for block unique naming
    # Input shape
        4D tensor with shape:
        `(samples, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows, cols, channels)` if data_format='channels_last'.
    # Output shape
        4D tensor with shape:
        `(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.
    # Returns
        a keras tensor
    '''
    with K.name_scope('TransitionUp'):

        if type == 'upsampling':
            x = UpSampling3D(
                name=name_or_none(block_prefix, '_upsampling'))(ip)
        elif type == 'subpixel':
            x = Conv3D(nb_filters, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_normal',
                       name=name_or_none(block_prefix, '_Conv3D'))(ip)
            x = SubPixelUpscaling(scale_factor=2,
                                  name=name_or_none(block_prefix,
                                                    '_subpixel'))(x)
            x = Conv3D(nb_filters, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_normal',
                       name=name_or_none(block_prefix, '_Conv3D'))(x)
        else:
            x = Conv3DTranspose(nb_filters, (3, 3, 3),
                                activation='relu',
                                padding='same',
                                strides=(2, 2, 2),
                                kernel_initializer='he_normal',
                                kernel_regularizer=l2(weight_decay),
                                name=name_or_none(block_prefix,
                                                  '_Conv3DT'))(ip)
        return x
Beispiel #9
0
def _shortcut3d(input, residual, deconv=False):
    """3D shortcut to match input and residual and merges them with "sum"."""
    if deconv:
        stride_dim1 = math.floor(residual._keras_shape[DIM1_AXIS] /
                                 input._keras_shape[DIM1_AXIS])
        stride_dim2 = math.floor(residual._keras_shape[DIM2_AXIS] /
                                 input._keras_shape[DIM2_AXIS])
        stride_dim3 = math.floor(residual._keras_shape[DIM3_AXIS] /
                                 input._keras_shape[DIM3_AXIS])

        padding_dim1 = (
            residual._keras_shape[DIM1_AXIS]) % input._keras_shape[DIM1_AXIS]
        padding_dim2 = (
            residual._keras_shape[DIM2_AXIS]) % input._keras_shape[DIM2_AXIS]
        padding_dim3 = (
            residual._keras_shape[DIM3_AXIS]) % input._keras_shape[DIM3_AXIS]
        padding_dim1 = (math.floor(0.5 * padding_dim1),
                        math.ceil(0.5 * padding_dim1))
        padding_dim2 = (math.floor(0.5 * padding_dim2),
                        math.ceil(0.5 * padding_dim2))
        padding_dim3 = (math.floor(0.5 * padding_dim3),
                        math.ceil(0.5 * padding_dim3))

    else:
        stride_dim1 = math.ceil(input._keras_shape[DIM1_AXIS] /
                                residual._keras_shape[DIM1_AXIS])
        stride_dim2 = math.ceil(input._keras_shape[DIM2_AXIS] /
                                residual._keras_shape[DIM2_AXIS])
        stride_dim3 = math.ceil(input._keras_shape[DIM3_AXIS] /
                                residual._keras_shape[DIM3_AXIS])

    equal_channels = residual._keras_shape[CHANNEL_AXIS] == input._keras_shape[
        CHANNEL_AXIS]

    shortcut = input
    if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 \
            or not equal_channels:

        if deconv:  #output = stride_dim * input_dim + padding_dim
            shortcut = Conv3DTranspose(
                filters=residual._keras_shape[CHANNEL_AXIS],
                kernel_size=(1, 1, 1),
                strides=(stride_dim1, stride_dim2, stride_dim3),
                kernel_initializer="he_normal",
                padding="valid",
                kernel_regularizer=l2(1e-4))(input)

            shortcut = ZeroPadding3D(padding=(padding_dim1, padding_dim2,
                                              padding_dim3))(shortcut)

        else:
            shortcut = Conv3D(filters=residual._keras_shape[CHANNEL_AXIS],
                              kernel_size=(1, 1, 1),
                              strides=(stride_dim1, stride_dim2, stride_dim3),
                              kernel_initializer="he_normal",
                              padding="valid",
                              kernel_regularizer=l2(1e-4))(input)

    return add([shortcut, residual])
Beispiel #10
0
def get_deconv_layer(dimension, input, num_filters) :
    strides = (2, 2) if dimension == 2 else (2, 2, 2)
    kernel_size = (2, 2) if dimension == 2 else (2, 2, 2)

    if dimension == 2:
        return Conv2DTranspose(num_filters, kernel_size=kernel_size, strides=strides)(input)
    else :
        return Conv3DTranspose(num_filters, kernel_size=kernel_size, strides=strides)(input)
Beispiel #11
0
def ConvolutionTranspose(ndim=2, *args, **kwargs):
    layer = None
    if ndim==2:
        layer = Conv2DTranspose(*args, **kwargs)
    elif ndim==3:
        layer = Conv3DTranspose(*args, **kwargs)
    else:
        raise ValueError("ndim must be 2 or 3")
    return layer
Beispiel #12
0
def decoder_model():

    inputs = Input(shape=(10, 16, 16, 32))

    # 10x32x32
    conv_1 = Conv3DTranspose(filters=64,
                             kernel_size=(3, 5, 5),
                             padding='same',
                             strides=(1, 1, 1),
                             input_shape=(10, 16, 16, 32))(inputs)
    x = TimeDistributed(BatchNormalization())(conv_1)
    x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    x = TimeDistributed(Dropout(0.5))(x)

    # 10x64x64
    conv_2 = Conv3DTranspose(filters=128,
                             kernel_size=(3, 5, 5),
                             padding='same',
                             strides=(1, 2, 2))(x)
    x = TimeDistributed(BatchNormalization())(conv_2)
    x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    x = TimeDistributed(Dropout(0.5))(x)

    # 10x64x64
    conv_3 = Conv3DTranspose(filters=64,
                             kernel_size=(3, 5, 5),
                             padding='same',
                             strides=(1, 2, 2))(x)
    x = TimeDistributed(BatchNormalization())(conv_3)
    x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    x = TimeDistributed(Dropout(0.5))(x)

    # 10x128x128
    conv_4 = Conv3DTranspose(filters=3,
                             kernel_size=(3, 11, 11),
                             strides=(2, 2, 2),
                             padding='same')(x)
    x = TimeDistributed(BatchNormalization())(conv_4)
    x = TimeDistributed(Activation('tanh'))(x)
    predictions = TimeDistributed(Dropout(0.5))(x)

    model = Model(inputs=inputs, outputs=predictions)

    return model
Beispiel #13
0
def transpose_block(input_tensor, skip_tensor, n_filters, kernel_size=3, strides=1):

    # A wrapper of the Keras Conv3DTranspose block to serve as a building block for upsampling layers

    shape_x = K.int_shape(input_tensor)
    shape_xskip = K.int_shape(skip_tensor)

    conv = Conv3DTranspose(filters=n_filters, kernel_size=kernel_size, padding='same', strides=(shape_xskip[1] // shape_x[1], shape_xskip[2] // shape_x[2], shape_xskip[3] // shape_x[3]), kernel_initializer="he_normal")(input_tensor)
    act = LeakyReLU(alpha=alpha)(conv)
    output = Concatenate(axis=4)([act, skip_tensor])
    return output
def unet_block_expand(block_input, numFts, concat_block, conv_kernel):
    # Defining a UNET block in the feature upsampling path
    up = Conv3DTranspose(numFts, (3, 3, 3), strides=(2, 2, 1),
                         padding='same')(block_input)
    up = concatenate([up, concat_block], axis=4)
    up = Conv3D(numFts, conv_kernel, padding='same')(up)
    up = BatchNormalization()(up)
    up = Activation('relu')(up)
    up = Conv3D(numFts, conv_kernel, padding='same')(up)
    up = BatchNormalization()(up)
    up = Activation('relu')(up)
    return up
Beispiel #15
0
def level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
    if depth > 0:
        n = conv_block(m, dim, acti, bn, res,do=0)
        m = MaxPooling3D(pool_size=(2,2,2))(n) if mp else Conv3D(dim, 3, strides=2, padding='same')(n)
        m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)
        
        if up:
                m = UpSampling3D(size=(2,2,2))(m)
                m = Conv3D(dim, 2, activation=acti, padding='same')(m)
        else:
                m = Conv3DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
        n = Concatenate()([n, m])
        m = conv_block(n, dim, acti, bn, res,do=0)
    else:
            m = conv_block(m, dim, acti, bn, res, do=0.5)
    return m
Beispiel #16
0
def build_generator(img_shape, gf):
    """U-Net Generator"""

    def conv3d(layer_input, filters, f_size=(4,4,4), bn=True):

        d = create_convolution_block(input_layer=layer_input, n_filters=filters,
                                        batch_normalization=bn, strides=(2, 2, 2), kernel_size=f_size)
        return d

    def deconv3d(layer_input, skip_input, filters, f_size=(4,4,4),drop=True):
        """Layers used during upsampling"""

        u = create_convolution_block_up(input_layer=layer_input,skip_conn=skip_input, n_filters=filters,
                                     batch_normalization=True, strides=(2, 2, 2), kernel_size=f_size, dropout=drop)

        return u

    # Image input
    d0 = Input(batch_shape=img_shape)

    # Downsampling
    e1 = conv3d(d0, gf, bn=False)  # 64
    e2 = conv3d(e1, gf * 2)        # 128
    e3 = conv3d(e2, gf * 4)        # 256
    e4 = conv3d(e3, gf * 8)        # 512
    e5 = conv3d(e4, gf * 8)        # 512

    # bottleneck
    e6 = bottleneck(e5, gf * 8, batch_normalization=False, kernel_size=(4, 4, 4), activation='relu',
                             padding='same', strides=(2, 2, 2), instance_normalization=False)        # 512

    # Upsampling
    u1 = deconv3d(e6, e5, gf * 8, drop=True)
    u2 = deconv3d(u1, e4, gf * 8, drop=True)
    u3 = deconv3d(u2, e3, gf * 4, drop=True)
    u4 = deconv3d(u3, e2, gf * 2, drop=False)
    u5 = deconv3d(u4, e1, gf, drop=False)
    #
    #
    init = RandomNormal(mean=0.0, stddev=0.02)  # new
    u6 = Conv3DTranspose(filters=gf, kernel_size=(4,4,4), padding='same', kernel_initializer=init, strides=(2,2,2))(u5)
    #
    final_convolution = Conv3D(1, (1, 1, 1))(u6)
    act = Activation('relu')(final_convolution)

    return Model(d0, act)
Beispiel #17
0
 def f(input):
     if deconv:
         conv = Conv3DTranspose(
             filters=filters,
             kernel_size=kernel_size,
             strides=strides,
             kernel_initializer=kernel_initializer,
             padding=padding,
             kernel_regularizer=kernel_regularizer)(input)
     else:
         conv = Conv3D(filters=filters,
                       kernel_size=kernel_size,
                       strides=strides,
                       kernel_initializer=kernel_initializer,
                       padding=padding,
                       kernel_regularizer=kernel_regularizer)(input)
     return _bn_relu(conv)
Beispiel #18
0
def upconvolve(opName,
               inputLayer,
               outputChannel,
               kernelSize,
               stride,
               targetShape,
               stddev=1e-2,
               reuse=False,
               weights_init='glorot_uniform'):
    return Conv3DTranspose(outputChannel,
                           kernelSize,
                           strides=stride,
                           padding='same',
                           activation='linear',
                           use_bias=False,
                           name=opName,
                           kernel_initializer=weights_init)(inputLayer)
Beispiel #19
0
def myConvTranspose(nf,
                    n_dims,
                    prefix=None,
                    suffix=None,
                    ks=3,
                    strides=1,
                    kernel_initializer=None,
                    bias_initializer=None):
    if kernel_initializer is None:
        kernel_initializer = 'glorot_uniform'  # keras default for conv kernels
    if bias_initializer is None:
        bias_initializer = 'zeros'  # default for keras conv
    # wrapper for 2D and 3D conv
    if n_dims == 2:
        if not isinstance(strides, tuple):
            strides = (strides, strides)
        return Conv2DTranspose(
            nf,
            kernel_size=ks,
            padding='same',
            strides=strides,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            name='_'.join([
                str(part)
                for part in [prefix, 'conv2Dtrans', suffix
                             ]  # include prefix and suffix if they exist
                if part is not None and len(str(part)) > 0
            ]))
    elif n_dims == 3:
        if not isinstance(strides, tuple):
            strides = (strides, strides, strides)
        return Conv3DTranspose(
            nf,
            kernel_size=ks,
            padding='same',
            strides=strides,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            name='_'.join([
                str(part)
                for part in [prefix, 'conv3Dtrans', suffix
                             ]  # include prefix and suffix if they exist
                if part is not None and len(str(part)) > 0
            ]))
Beispiel #20
0
    def f(input):
        activation = _bn_relu(input)

        if deconv:
            return Conv3DTranspose(
                filters=filters,
                kernel_size=kernel_size,
                strides=strides,
                kernel_initializer=kernel_initializer,
                padding=padding,
                kernel_regularizer=kernel_regularizer)(activation)
        else:
            return Conv3D(filters=filters,
                          kernel_size=kernel_size,
                          strides=strides,
                          kernel_initializer=kernel_initializer,
                          padding=padding,
                          kernel_regularizer=kernel_regularizer)(activation)
def conv_block_decode(block_input,
                      numFt1,
                      numFt2,
                      concat_block,
                      conv_kernel,
                      strides=(2, 2, 1)):
    # Defining a Convolutional block in the decoding path of UNET
    up = Conv3DTranspose(numFt1, (3, 3, 3), strides=strides,
                         padding='same')(block_input)
    up = concatenate([up, concat_block], axis=4)
    up = Conv3D(numFt1, conv_kernel, padding='same')(up)
    up = BatchNormalization()(up)
    up = Activation('relu')(up)
    up = Conv3D(numFt1, conv_kernel, padding='same')(up)
    up = BatchNormalization()(up)
    up = Activation('relu')(up)
    up = Dropout(0.5)(up)
    up = Conv3D(numFt2, conv_kernel, padding='same')(up)
    up = BatchNormalization()(up)
    up = Activation('relu')(up)
    up = Conv3D(numFt2, conv_kernel, padding='same')(up)
    up = BatchNormalization()(up)
    up = Activation('relu')(up)
    return up
Beispiel #22
0
def unet_3d(shape, n_filters):
    # Convolutional block: Conv3x3 -> BN -> ReLU
    def conv_block(inputs,
                   n_filters,
                   kernel_size=(3, 3, 3),
                   activation='relu',
                   batch_norm=True,
                   padding='same'):
        x = Conv3D(filters=n_filters, kernel_size=kernel_size,
                   padding=padding)(inputs)

        if batch_norm:
            x = BatchNormalization()(x)
        x = Activation('relu')(x)

        x = Conv3D(filters=n_filters, kernel_size=kernel_size,
                   padding=padding)(x)

        if batch_norm:
            x = BatchNormalization()(x)
        x = Activation('relu')(x)

        return x

    inputs = Input((*shape, 1))
    padded = pad_to_fit(inputs)

    # Contracting path
    conv1 = conv_block(padded, n_filters)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)

    conv2 = conv_block(pool1, n_filters * 2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)

    conv3 = conv_block(pool2, n_filters * 4)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)

    conv4 = conv_block(pool3, n_filters * 8)
    pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conv4)

    # Bridge
    conv5 = conv_block(pool4, n_filters * 16)

    # Expansive path
    up6 = Conv3DTranspose(filters=n_filters * 8,
                          kernel_size=(3, 3, 3),
                          strides=(2, 2, 2),
                          padding='same')(conv5)
    up6 = concatenate([up6, conv4])
    conv6 = conv_block(up6, n_filters * 8)

    up7 = Conv3DTranspose(filters=n_filters * 4,
                          kernel_size=(3, 3, 3),
                          strides=(2, 2, 2),
                          padding='same')(conv6)
    up7 = concatenate([up7, conv3])
    conv7 = conv_block(up7, n_filters * 4)

    up8 = Conv3DTranspose(filters=n_filters * 2,
                          kernel_size=(3, 3, 3),
                          strides=(2, 2, 2),
                          padding='same')(conv7)
    up8 = concatenate([up8, conv2])
    conv8 = conv_block(up8, n_filters * 2)

    up9 = Conv3DTranspose(filters=n_filters,
                          kernel_size=(3, 3, 3),
                          strides=(2, 2, 2),
                          padding='same')(conv8)
    up9 = concatenate([up9, conv1])
    conv9 = conv_block(up9, n_filters)

    outputs = Conv3D(filters=1, kernel_size=(1, 1, 1),
                     activation='sigmoid')(conv9)
    outputs = crop_to_fit(inputs, outputs)

    model = Model(inputs=[inputs], outputs=[outputs])

    return model
Beispiel #23
0
def deconv3D_layer_bn(l0, name=None, filters=32, kernel_size=(2,2,2), strides=(2,2,2), padding='same', activation='relu',
                      kernel_initializer="he_normal"):
    l = Conv3DTranspose( filters=filters, name=name, kernel_size=kernel_size, strides=strides, padding=padding,
                activation=activation, kernel_initializer=kernel_initializer)(l0)
    l = BatchNormalization()(l)
    return l
Beispiel #24
0
    def build(input_shape, num_outputs, block_fn, reg_factor):
        """Instantiate a VoxResNet keras model.
        # Arguments
            input_shape: Tuple of input shape in the format
            (conv_dim1, conv_dim2, conv_dim3, channels) if dim_ordering='tf'
            (filter, conv_dim1, conv_dim2, conv_dim3) if dim_ordering='th'
            num_outputs: The number of outputs at the final softmax layer
            block_fn: Unit block to use {'basic_block', 'bottlenack_block'}
            repetitions: Repetitions of unit blocks
        # Returns
            model: a 3D ResNet model that takes a 5D tensor (volumetric images
            in batch) as input and returns a 1D vector (prediction) as output.
        """
        get_custom_objects().update(
            {'argmax_activation': Activation(custom_activation)})
        _handle_data_format()
        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1, conv_dim2, conv_dim3, channels) "
                             "for tensorflow as backend or "
                             "(channels, conv_dim1, conv_dim2, conv_dim3) "
                             "for theano as backend")

        block_fn = _get_block(block_fn)
        v_input = Input(shape=input_shape)
        # conv1
        conv1a = _conv_bn_relu3D(filters=32,
                                 kernel_size=(3, 3, 3),
                                 strides=(1, 1, 1),
                                 kernel_regularizer=l2(reg_factor))(v_input)
        conv1b = _conv_bn_relu3D(filters=32,
                                 kernel_size=(3, 3, 3),
                                 strides=(1, 1, 1),
                                 kernel_regularizer=l2(reg_factor))(conv1a)
        # C1
        C1d = Conv3DTranspose(filters=32,
                              kernel_size=(3, 3, 3),
                              strides=(1, 1, 1),
                              kernel_regularizer=l2(reg_factor),
                              padding='same')(conv1b)
        C1 = Conv3D(filters=32,
                    kernel_size=(1, 1, 1),
                    strides=(1, 1, 1),
                    padding='same',
                    kernel_regularizer=l2(reg_factor))(C1d)

        conv1c = Conv3D(filters=64,
                        kernel_size=(3, 3, 3),
                        strides=(2, 2, 2),
                        kernel_regularizer=l2(reg_factor),
                        padding='same')(conv1b)

        # voxres block2-3
        filters = 64
        block23 = _residual_block3d(block_fn,
                                    filters=filters,
                                    kernel_regularizer=l2(reg_factor),
                                    repetitions=2,
                                    is_first_layer=False)(conv1c)
        # C2
        C2d = Conv3DTranspose(filters=filters,
                              kernel_size=(3, 3, 3),
                              strides=(2, 2, 2),
                              kernel_regularizer=l2(reg_factor),
                              padding='same')(block23)

        C2 = Conv3D(filters=filters,
                    kernel_size=(1, 1, 1),
                    strides=(1, 1, 1),
                    padding='same',
                    kernel_regularizer=l2(reg_factor))(C2d)

        # concatenation and segmentation
        C = Concatenate(axis=-1)([C1, C2])
        C = Conv3D(filters=num_outputs,
                   kernel_size=(1, 1, 1),
                   strides=(1, 1, 1),
                   padding='same',
                   kernel_regularizer=l2(reg_factor))(C)
        #        F = Reshape((input_shape[0]*input_shape[1]*input_shape[2]*num_outputs,))(C)
        C = Activation('softmax')(C)
        #        C = Lambda(custom_activation, output_shape = (input_shape[0],input_shape[1],input_shape[2],1))(C)

        model = Model(inputs=v_input, outputs=C)
        return model
    def create_model(self):
        print("Build U-Net model")

        # Build U-Net model
        inputs = Input((self.IMG_SIZE, self.IMG_SIZE, self.IMG_SIZE, 1))
        s = Lambda(lambda x: x / 255)(inputs)

        c1 = Conv3D(8, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(s)
        c1 = Dropout(0.1)(c1)
        c1 = Conv3D(8, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c1)
        p1 = MaxPooling3D((2, 2, 2))(c1)

        c2 = Conv3D(32, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(p1)
        c2 = Dropout(0.1)(c2)
        c2 = Conv3D(32, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c2)
        p2 = MaxPooling3D((2, 2, 2))(c2)

        c3 = Conv3D(64, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(p2)
        c3 = Dropout(0.2)(c3)
        c3 = Conv3D(64, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c3)
        p3 = MaxPooling3D((2, 2, 2))(c3)

        c4 = Conv3D(128, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(p3)
        c4 = Dropout(0.2)(c4)
        c4 = Conv3D(128, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c4)
        p4 = MaxPooling3D(pool_size=(2, 2, 2))(c4)

        c5 = Conv3D(256, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(p4)
        c5 = Dropout(0.3)(c5)
        c5 = Conv3D(256, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c5)

        u6 = Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2),
                             padding='same')(c5)
        u6 = concatenate([u6, c4])
        c6 = Conv3D(128, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(u6)
        c6 = Dropout(0.2)(c6)
        c6 = Conv3D(128, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c6)

        u7 = Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2),
                             padding='same')(c6)
        u7 = concatenate([u7, c3])
        c7 = Conv3D(64, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(u7)
        c7 = Dropout(0.2)(c7)
        c7 = Conv3D(64, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c7)

        u8 = Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2),
                             padding='same')(c7)
        u8 = concatenate([u8, c2])
        c8 = Conv3D(32, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(u8)
        c8 = Dropout(0.1)(c8)
        c8 = Conv3D(32, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c8)

        u9 = Conv3DTranspose(8, (2, 2, 2), strides=(2, 2, 2),
                             padding='same')(c8)
        u9 = concatenate([u9, c1], axis=-1)
        c9 = Conv3D(8, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(u9)
        c9 = Dropout(0.1)(c9)
        c9 = Conv3D(8, (3, 3, 3),
                    activation='elu',
                    kernel_initializer='he_normal',
                    padding='same')(c9)

        outputs = Conv3D(self.NUM_CLASSES, (1, 1, 1), activation='softmax')(c9)

        self.model = Model(inputs=[inputs], outputs=[outputs])
        #self.model.compile(optimizer='adam', loss=iou_coef_loss, metrics=[ iou_coef_loss, dice_coef_loss, "accuracy" ])
        self.model.compile(optimizer='adam',
                           loss="categorical_crossentropy",
                           metrics=[
                               iou_coef, dice_coef, iou_coef_loss,
                               dice_coef_loss, "accuracy"
                           ])
        self.model.summary()
    def build_generator(self):

        #model = Sequential()

        #in 96*96*1 (noise)  + (96*96*1) + (96*96*1) + (96*96*1)  [labels]
        #out 96*96*96
        noise = Input(shape=(96,96,1))
        #x1 = Input(shape=(96,96,1))
        x2 = Input(shape=(96,96,1))
        x3 = Input(shape=(96,96,1))

        # ############### Condition X1 ######################################
        # tower_1 = Conv2D(16, (2,2), strides=2, padding='same', activation='relu')(x1)
        # tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        # tower_1 = Conv2D(16, (2,2), strides=1, padding='same', activation='relu')(tower_1)
        # tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        # tower_1 = Conv2D(32, (2,2), strides=2, padding='same', activation='relu')(tower_1)
        # tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        # tower_1 = Conv2D(32, (2,2), strides=1, padding='same', activation='relu')(tower_1)
        # tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        # tower_1 = Conv2D(64, (2,2), strides=2, padding='same', activation='relu')(tower_1)
        # tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        # tower_1 = Conv2D(64, (2,2), strides=1, padding='same', activation='relu')(tower_1)
        # tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        # tower_1 = Conv2D(1, (2,2), strides=2, padding='same', activation='relu')(tower_1)
        # tower_1 = Flatten()(tower_1)
        # tower_1 = Dense(16)(tower_1)


        ############ Condition X2 ############################################
        tower_2 = Conv2D(16, (2,2), strides=2, padding='same', activation='relu')(x2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(16, (2,2), strides=1, padding='same', activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(32, (2,2), strides=2, padding='same', activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(32, (2,2), strides=1, padding='same', activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(64, (2,2), strides=2, padding='same', activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(64, (2,2), strides=1, padding='same', activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(1, (2,2), strides=2, padding='same', activation='relu')(tower_2)
        tower_2 = Flatten()(tower_2)
        tower_2 = Dense(24)(tower_2)

        ########### Condition X3 ##############################################
        tower_3 = Conv2D(16, (2,2), strides=2, padding='same', activation='relu')(x3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(16, (2,2), strides=1, padding='same', activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(32, (2,2), strides=2, padding='same', activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(32, (2,2), strides=1, padding='same', activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(64, (2,2), strides=2, padding='same', activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(64, (2,2), strides=1, padding='same', activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(1, (2,2), strides=2, padding='same', activation='relu')(tower_3)
        tower_3 = Flatten()(tower_3)
        tower_3 = Dense(24)(tower_3)

        ########### Noise #####################################################
        n_tower = Conv2D(16, (2,2), strides=2, padding='same', activation='relu')(noise)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(16, (2,2), strides=1, padding='same', activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(32, (2,2), strides=2, padding='same', activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(32, (2,2), strides=1, padding='same', activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(64, (2,2), strides=2, padding='same', activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(64, (2,2), strides=1, padding='same', activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(1, (2,2), strides=2, padding='same', activation='relu')(n_tower)
        n_tower = Flatten()(n_tower)
        n_tower = Dense(16)(n_tower)

        ################### Latent Space (vector of size 48) ###################
        model_input = concatenate([n_tower, tower_2, tower_3], axis=-1)


        ################## Reconstruct 3D image #################################
        x = Reshape((4,4,4,1))(model_input)

        x = (Conv3DTranspose(64, (7,7,7), strides=3, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(64, (3,3,3), strides=1, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(32, (4,4,4), strides=2, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(32, (3,3,3), strides=1, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(16, (4,4,4), strides=2, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(16, (3,3,3), strides=1, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(16, (4,4,4), strides=2, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(1, (1,1,1), strides=1, padding='same'))(x)
        x = (Activation('sigmoid'))(x)

        return Model([noise, x2, x3], x)
Beispiel #27
0
def Vnet_3d(input_img, n_filters=8, dropout=0.2, batch_norm=True):

    #c1 = conv_block(input_img,n_filters,3,batch_norm)
    c1 = Conv3D(n_filters,
                kernel_size=(5, 5, 5),
                strides=(1, 1, 1),
                padding='same')(input_img)
    #c1 = add([c1,input_img])

    c2 = Conv3D(n_filters * 2,
                kernel_size=(2, 2, 2),
                strides=(2, 2, 2),
                padding='same')(c1)

    c3 = conv_block(c2, n_filters * 2, 5, True)

    p3 = Conv3D(n_filters * 4,
                kernel_size=(2, 2, 2),
                strides=(2, 2, 2),
                padding='same')(c3)
    p3 = Dropout(dropout)(p3)

    c4 = conv_block(p3, n_filters * 4, 5, True)
    p4 = Conv3D(n_filters * 8,
                kernel_size=(2, 2, 2),
                strides=(2, 2, 2),
                padding='same')(c4)
    p4 = Dropout(dropout)(p4)

    c5 = conv_block(p4, n_filters * 8, 5, True)
    p6 = Conv3D(n_filters * 16,
                kernel_size=(2, 2, 2),
                strides=(2, 2, 2),
                padding='same')(c5)
    p6 = Dropout(dropout)(p6)
    #c6 = conv_block(p5, n_filters*8,5,True)
    #p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c6)

    p7 = conv_block(p6, n_filters * 16, 5, True)

    u6 = Conv3DTranspose(n_filters * 8, (2, 2, 2),
                         strides=(2, 2, 2),
                         padding='same')(p7)
    u6 = concatenate([u6, c5])
    c7 = conv_block(u6, n_filters * 16, 5, True)
    c7 = Dropout(dropout)(c7)
    u7 = Conv3DTranspose(n_filters * 4, (2, 2, 2),
                         strides=(2, 2, 2),
                         padding='same')(c7)

    u8 = concatenate([u7, c4])
    c8 = conv_block(u8, n_filters * 8, 5, True)
    c8 = Dropout(dropout)(c8)
    u9 = Conv3DTranspose(n_filters * 2, (2, 2, 2),
                         strides=(2, 2, 2),
                         padding='same')(c8)

    u9 = concatenate([u9, c3])
    c9 = conv_block(u9, n_filters * 4, 5, True)
    c9 = Dropout(dropout)(c9)
    u10 = Conv3DTranspose(n_filters, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding='same')(c9)

    u10 = concatenate([u10, c1])
    c10 = Conv3D(n_filters * 2,
                 kernel_size=(5, 5, 5),
                 strides=(1, 1, 1),
                 padding='same')(u10)
    c10 = Dropout(dropout)(c10)
    c10 = add([c10, u10])

    #c9 = conv_block(u9,n_filters,3,batch_norm)
    outputs = Conv3D(4, (1, 1, 1), activation='softmax')(c10)

    model = Model(inputs=input_img, outputs=outputs)

    return model
Beispiel #28
0
    def build_generator(self):

        #model = Sequential()

        #in 96x96x1 noise  + 96x96x1 + 96x96x1 + 96x96x1  labels
        #out 96*96*96
        noise = Input(shape=(96, 96, 1))
        x1 = Input(shape=(96, 96, 1))
        x2 = Input(shape=(96, 96, 1))
        x3 = Input(shape=(96, 96, 1))

        tower_1 = Conv2D(16, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(x1)
        tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        tower_1 = Conv2D(16, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_1)
        tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        tower_1 = Conv2D(32, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_1)
        tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        tower_1 = Conv2D(32, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_1)
        tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        tower_1 = Conv2D(64, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_1)
        tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        tower_1 = Conv2D(64, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_1)
        tower_1 = (BatchNormalization(momentum=0.9))(tower_1)
        tower_1 = Conv2D(1, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_1)
        tower_1 = Flatten()(tower_1)
        tower_1 = Dense(16)(tower_1)

        tower_2 = Conv2D(16, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(x2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(16, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(32, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(32, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(64, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(64, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_2)
        tower_2 = (BatchNormalization(momentum=0.9))(tower_2)
        tower_2 = Conv2D(1, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_2)
        tower_2 = Flatten()(tower_2)
        tower_2 = Dense(16)(tower_2)

        tower_3 = Conv2D(16, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(x3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(16, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(32, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(32, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(64, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(64, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(tower_3)
        tower_3 = (BatchNormalization(momentum=0.9))(tower_3)
        tower_3 = Conv2D(1, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(tower_3)
        tower_3 = Flatten()(tower_3)
        tower_3 = Dense(16)(tower_3)

        n_tower = Conv2D(16, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(noise)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(16, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(32, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(32, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(64, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(64, (2, 2),
                         strides=1,
                         padding='same',
                         activation='relu')(n_tower)
        n_tower = (BatchNormalization(momentum=0.9))(n_tower)
        n_tower = Conv2D(1, (2, 2),
                         strides=2,
                         padding='same',
                         activation='relu')(n_tower)
        n_tower = Flatten()(n_tower)
        n_tower = Dense(16)(n_tower)

        model_input = concatenate([n_tower, tower_1, tower_2, tower_3],
                                  axis=-1)

        # model_input = Conv2D(16, (2,2), strides=2, padding='same', activation='relu')(merge)
        # model_input = BatchNormalization()(model_input)
        # model_input = Conv2D(16, (2,2), strides=1, padding='same', activation='relu')(model_input)
        # model_input = BatchNormalization()(model_input)
        # model_input = Conv2D(32, (2,2), strides=2, padding='same', activation='relu')(model_input)
        # model_input = BatchNormalization()(model_input)
        # model_input = Conv2D(32, (2,2), strides=1, padding='same', activation='relu')(model_input)
        # model_input = BatchNormalization()(model_input)
        # #model_input = Conv2D(64, (2,2), strides=2, padding='same', activation='relu')(model_input)
        # #model_input = BatchNormalization()(model_input)
        # #model_input = Conv2D(64, (2,2), strides=1, padding='same', activation='relu')(model_input)
        # #model_input = BatchNormalization()(model_input)
        # model_input = Conv2D(1, (2,2), strides=3, padding='same', activation='relu')(model_input)
        # model_input = BatchNormalization()(model_input)
        # model_input = Flatten()(model_input)
        # model_input = Dense(64, activation='relu')(model_input)

        x = Reshape((4, 4, 4, 1))(model_input)

        x = (Conv3DTranspose(64, (7, 7, 7), strides=3, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)
        #x = (Dropout(0.4))(x)

        x = (Conv3DTranspose(64, (3, 3, 3), strides=1, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)
        #x = (Dropout(0.4))(x)

        x = (Conv3DTranspose(32, (4, 4, 4), strides=2, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(32, (3, 3, 3), strides=1, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(16, (4, 4, 4), strides=2, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(16, (3, 3, 3), strides=1, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(16, (4, 4, 4), strides=2, padding='same'))(x)
        x = (BatchNormalization(momentum=0.9))(x)
        x = (Activation('relu'))(x)

        x = (Conv3DTranspose(1, (1, 1, 1), strides=1, padding='same'))(x)
        x = (Activation('sigmoid'))(x)

        #label_embedding = Flatten()(Embedding(96*96*3, self.latent_dim)(label))

        # hello = Model([noise, label], x)
        # plot_model(hello,
        #            to_file='cGAN_generator.png',
        #            show_shapes=True)
        return Model([noise, x1, x2, x3], x)
Beispiel #29
0
def Unet(img_shape, params, path='./'):

    # print message at runtime
    if (img_shape[0] == 64 and np.size(img_shape) == 3):
        print('Create 2D U-Net network with 3 levels...\n')
    elif (img_shape[0] == 128 and np.size(img_shape) == 3):
        print('Create 2D U-Net network with 4 levels...\n')
    elif (img_shape[0] == 64 and np.size(img_shape) == 4):
        print('Create 3D U-Net network with 3 levels...\n')
    elif (img_shape[0] == 128 and np.size(img_shape) == 4):
        print('Create 3D U-Net network with 4 levels...\n')
    else:
        print('???')

    def Conv2D_Layers(prev_layer, kernel_size, nr_filts, layer_name):
        # first layer
        a = Conv2D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C1' % layer_name)(prev_layer)
        a = BatchNormalization(name='%s_BN1' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A1' % layer_name)(a)
        # second layer
        a = Conv2D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C2' % layer_name)(a)
        a = BatchNormalization(name='%s_BN2' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A2' % layer_name)(a)
        return a

    def Conv3D_Layers(prev_layer, kernel_size, nr_filts, layer_name):
        # first layer
        a = Conv3D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C1' % layer_name)(prev_layer)
        a = BatchNormalization(name='%s_BN1' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A1' % layer_name)(a)
        # second layer
        a = Conv3D(filters=nr_filts,
                   kernel_size=kernel_size,
                   padding='same',
                   kernel_initializer="he_normal",
                   name='%s_C2' % layer_name)(a)
        a = BatchNormalization(name='%s_BN2' % layer_name)(a)
        a = Activation(params['activation'], name='relu_%s_A2' % layer_name)(a)
        return a

    img_input = Input(shape=img_shape, name='Image')

    # U-Net Encoder - upper level
    if (np.size(img_shape) == 3):
        # 2-D network
        e1c = Conv2D_Layers(prev_layer=img_input,
                            nr_filts=int(params['coarse_dim'] / 16),
                            kernel_size=params['kernel_size'],
                            layer_name='E1')
        e1 = MaxPooling2D(pool_size=(2, 2), name='E1_P')(e1c)
        e1 = Dropout(params['dropout'] * 0.5, name='E1_D2')(e1)
    elif (np.size(img_shape) == 4):
        # 3-D network
        e1c = Conv3D_Layers(prev_layer=img_input,
                            nr_filts=int(params['coarse_dim'] / 16),
                            kernel_size=(params['kernel_size'],
                                         params['kernel_size'],
                                         params['kernel_size']),
                            layer_name='E1')
        e1 = MaxPooling3D(pool_size=(2, 2, 2), name='E1_P')(e1c)
        e1 = Dropout(params['dropout'] * 0.5, name='E1_D2')(e1)

    # U-Net Encoder - second level
    if (np.size(img_shape) == 3):
        # 2-D network
        e2c = Conv2D_Layers(prev_layer=e1,
                            nr_filts=int(params['coarse_dim'] / 8),
                            kernel_size=params['kernel_size'],
                            layer_name='E2')
        e2 = MaxPooling2D(pool_size=(2, 2), name='E2_P')(e2c)
        e2 = Dropout(params['dropout'], name='E2_D2')(e2)
    elif (np.size(img_shape) == 4):
        # 3-D network
        e2c = Conv3D_Layers(prev_layer=e1,
                            nr_filts=int(params['coarse_dim'] / 8),
                            kernel_size=(params['kernel_size'],
                                         params['kernel_size'],
                                         params['kernel_size']),
                            layer_name='E2')
        e2 = MaxPooling3D(pool_size=(2, 2, 2), name='E2_P')(e2c)
        e2 = Dropout(params['dropout'], name='E2_D2')(e2)

    # U-Net Encoder - third level
    if (np.size(img_shape) == 3):
        # 2-D network
        e3c = Conv2D_Layers(prev_layer=e2,
                            nr_filts=int(params['coarse_dim'] / 4),
                            kernel_size=params['kernel_size'],
                            layer_name='E3')
        e3 = MaxPooling2D(pool_size=(2, 2), name='E3_P')(e3c)
        e3 = Dropout(params['dropout'], name='E3_D2')(e3)
    elif (np.size(img_shape) == 4):
        # 3-D network
        e3c = Conv3D_Layers(prev_layer=e2,
                            nr_filts=int(params['coarse_dim'] / 4),
                            kernel_size=(params['kernel_size'],
                                         params['kernel_size'],
                                         params['kernel_size']),
                            layer_name='E3')
        e3 = MaxPooling3D(pool_size=(2, 2, 2), name='E3_P')(e3c)
        e3 = Dropout(params['dropout'], name='E3_D2')(e3)

    if (img_shape[0] >= 64 and img_shape[0] < 128):
        # U-Net Encoder - bottom level
        if (np.size(img_shape) == 3):
            # 2-D network
            b = Conv2D_Layers(prev_layer=e3,
                              nr_filts=int(params['coarse_dim'] / 2),
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size']),
                              layer_name='B')

            d3 = Conv2DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2),
                                 padding='same',
                                 name='D3_DC')(b)
        elif (np.size(img_shape) == 4):
            # 3-D network
            b = Conv3D_Layers(prev_layer=e3,
                              nr_filts=int(params['coarse_dim'] / 2),
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size'],
                                           params['kernel_size']),
                              layer_name='B')

            d3 = Conv3DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2, 2),
                                 padding='same',
                                 name='D3_DC')(b)
    elif (img_shape[0] >= 128):
        if (np.size(img_shape) == 3):
            # 2-D network
            # U-Net Encoder - fourth level
            e4c = Conv2D_Layers(prev_layer=e3,
                                nr_filts=int(params['coarse_dim'] / 2),
                                kernel_size=params['kernel_size'],
                                layer_name='E4')
            e4 = MaxPooling2D(pool_size=(2, 2), name='E4_P')(e4c)
            e4 = Dropout(params['dropout'], name='E4_D2')(e4)

            # U-Net Encoder - bottom level
            b = Conv2D_Layers(prev_layer=e4,
                              nr_filts=params['coarse_dim'],
                              kernel_size=params['kernel_size'],
                              layer_name='B')

            # U-Net Decoder - fourth level
            d4 = Conv2DTranspose(filters=int(params['coarse_dim'] / 2),
                                 kernel_size=params['kernel_size'],
                                 strides=(2, 2),
                                 padding='same',
                                 name='D4_DC')(b)
            d4 = concatenate([d4, e4c], name='merge_layer_E4_A2')
            d4 = Dropout(params['dropout'], name='D4_D1')(d4)
            d4 = Conv2D_Layers(prev_layer=d4,
                               nr_filts=int(params['coarse_dim'] / 2),
                               kernel_size=(params['kernel_size'],
                                            params['kernel_size']),
                               layer_name='D4')

            # U-Net Decoder - third level
            d3 = Conv2DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=params['kernel_size'],
                                 strides=(2, 2),
                                 padding='same',
                                 name='D3_DC')(d4)
        elif (np.size(img_shape) == 4):
            # 3-D network
            # U-Net Encoder - fourth level
            e4c = Conv3D_Layers(prev_layer=e3,
                                nr_filts=int(params['coarse_dim'] / 2),
                                kernel_size=(params['kernel_size'],
                                             params['kernel_size'],
                                             params['kernel_size']),
                                layer_name='E4')
            e4 = MaxPooling3D(pool_size=(2, 2, 2), name='E4_P')(e4c)
            e4 = Dropout(params['dropout'], name='E4_D2')(e4)

            # U-Net Encoder - bottom level
            b = Conv3D_Layers(prev_layer=e4,
                              nr_filts=params['coarse_dim'],
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size'],
                                           params['kernel_size']),
                              layer_name='B')

            # U-Net Decoder - fourth level
            d4 = Conv3DTranspose(filters=int(params['coarse_dim'] / 2),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2, 2),
                                 padding='same',
                                 name='D4_DC')(b)
            d4 = concatenate([d4, e4c], name='merge_layer_E4_A2')
            d4 = Dropout(params['dropout'], name='D4_D1')(d4)
            d4 = Conv3D_Layers(prev_layer=d4,
                               nr_filts=int(params['coarse_dim'] / 2),
                               kernel_size=(params['kernel_size'],
                                            params['kernel_size'],
                                            params['kernel_size']),
                               layer_name='D4')

            # U-Net Decoder - third level
            d3 = Conv3DTranspose(filters=int(params['coarse_dim'] / 4),
                                 kernel_size=(params['kernel_size'],
                                              params['kernel_size'],
                                              params['kernel_size']),
                                 strides=(2, 2, 2),
                                 padding='same',
                                 name='D3_DC')(d4)
    else:
        print('ERROR: input data have wrong dimension')

    # U-Net Decoder - third level (continue)
    if (np.size(img_shape) == 3):
        # 2-D network
        d3 = concatenate([d3, e3c], name='merge_layer_E3_A2')
        d3 = Dropout(params['dropout'], name='D3_D1')(d3)
        d3 = Conv2D_Layers(prev_layer=d3,
                           nr_filts=int(params['coarse_dim'] / 2),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D3')
    elif (np.size(img_shape) == 4):
        # 3-D network
        d3 = concatenate([d3, e3c], name='merge_layer_E3_A2')
        d3 = Dropout(params['dropout'], name='D3_D1')(d3)
        d3 = Conv3D_Layers(prev_layer=d3,
                           nr_filts=int(params['coarse_dim'] / 2),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D3')

    # U-Net Decoder - second level
    if (np.size(img_shape) == 3):
        # 2-D network
        d2 = Conv2DTranspose(filters=int(params['coarse_dim'] / 8),
                             kernel_size=params['kernel_size'],
                             strides=(2, 2),
                             padding='same',
                             name='D2_DC')(d3)
        d2 = concatenate([d2, e2c], name='merge_layer_E2_A2')
        d2 = Dropout(params['dropout'], name='D2_D1')(d2)
        d2 = Conv2D_Layers(prev_layer=d2,
                           nr_filts=int(params['coarse_dim'] / 4),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D2')
    elif (np.size(img_shape) == 4):
        # 3-D network
        d2 = Conv3DTranspose(filters=int(params['coarse_dim'] / 8),
                             kernel_size=(params['kernel_size'],
                                          params['kernel_size'],
                                          params['kernel_size']),
                             strides=(2, 2, 2),
                             padding='same',
                             name='D2_DC')(d3)
        d2 = concatenate([d2, e2c], name='merge_layer_E2_A2')
        d2 = Dropout(params['dropout'], name='D2_D1')(d2)
        d2 = Conv3D_Layers(prev_layer=d2,
                           nr_filts=int(params['coarse_dim'] / 4),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D2')

    # U-Net Decoder - upper level
    if (np.size(img_shape) == 3):
        d1 = Conv2DTranspose(filters=int(params['coarse_dim'] / 16),
                             kernel_size=params['kernel_size'],
                             strides=(2, 2),
                             padding='same',
                             name='D1_DC')(d2)
        d1 = concatenate([d1, e1c], name='merge_layer_E1_A2')
        d1 = Dropout(params['dropout'], name='D1_D1')(d1)
        d1 = Conv2D_Layers(prev_layer=d1,
                           nr_filts=int(params['coarse_dim'] / 16),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D1')
    elif (np.size(img_shape) == 4):
        d1 = Conv3DTranspose(filters=int(params['coarse_dim'] / 16),
                             kernel_size=(params['kernel_size'],
                                          params['kernel_size'],
                                          params['kernel_size']),
                             strides=(2, 2, 2),
                             padding='same',
                             name='D1_DC')(d2)
        d1 = concatenate([d1, e1c], name='merge_layer_E1_A2')
        d1 = Dropout(params['dropout'], name='D1_D1')(d1)
        d1 = Conv3D_Layers(prev_layer=d1,
                           nr_filts=int(params['coarse_dim'] / 16),
                           kernel_size=(params['kernel_size'],
                                        params['kernel_size'],
                                        params['kernel_size']),
                           layer_name='D1')

    # Outro Layer
    if (np.size(img_shape) == 3):
        output_image = Conv2D(filters=int(img_shape[-1]),
                              kernel_size=params['kernel_size'],
                              strides=(1, 1),
                              padding='same',
                              name='out_C')(d1)
    elif (np.size(img_shape) == 4):
        output_image = Conv3D(filters=int(img_shape[-1]),
                              kernel_size=(params['kernel_size'],
                                           params['kernel_size'],
                                           params['kernel_size']),
                              strides=(1, 1, 1),
                              padding='same',
                              name='out_C')(d1)

    output_image = Activation("sigmoid", name='sigmoid')(output_image)

    model = Model(inputs=[img_input], outputs=[output_image], name='Unet')

    plot_model(model,
               to_file=path + 'model_visualization.png',
               show_shapes=True,
               show_layer_names=True)

    return model
Beispiel #30
0
def fCreateModel_FCN_MultiFM_MultiPath(patchSize,
                                       patchSize_down,
                                       dr_rate=0.0,
                                       iPReLU=0,
                                       l1_reg=0,
                                       l2_reg=1e-6):
    # Total params: 2,841,098
    # The dense layer is repleced by a convolutional layer with filters=2 for the two classes
    # The FM from the third down scaled convolutional layer is upsempled by deconvolution and
    # added with the FM from the second down scaled convolutional layer.
    # The combined FM goes through a convolutional layer with filters=2 for the two classes
    # The four predictions from the two pathways are averages as the final result.
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    sharedConv1 = fCreateVNet_Block
    sharedDown1 = fCreateVNet_DownConv_Block
    sharedConv2 = fCreateVNet_Block
    sharedDown2 = fCreateVNet_DownConv_Block
    sharedConv3 = fCreateVNet_Block
    sharedDown3 = fCreateVNet_DownConv_Block

    inp1 = Input(shape=(1, int(patchSize[0]), int(patchSize[1]),
                        int(patchSize[2])))
    after_1Conv_1 = sharedConv1(inp1,
                                kernelnumber[0],
                                type=fgetLayerNumConv(),
                                l2_reg=l2_reg)
    after_1DownConv_1 = sharedDown1(after_1Conv_1,
                                    after_1Conv_1._keras_shape[1],
                                    Strides[0],
                                    iPReLU=iPReLU,
                                    dr_rate=dr_rate,
                                    l2_reg=l2_reg)

    after_1Conv_2 = sharedConv2(after_1DownConv_1,
                                kernelnumber[1],
                                type=fgetLayerNumConv(),
                                l2_reg=l2_reg)
    after_1DownConv_2 = sharedDown2(after_1Conv_2,
                                    after_1Conv_2._keras_shape[1],
                                    Strides[1],
                                    iPReLU=iPReLU,
                                    dr_rate=dr_rate,
                                    l2_reg=l2_reg)

    after_1Conv_3 = sharedConv3(after_1DownConv_2,
                                kernelnumber[2],
                                type=fgetLayerNumConv(),
                                l2_reg=l2_reg)
    after_1DownConv_3 = sharedDown3(after_1Conv_3,
                                    after_1Conv_3._keras_shape[1],
                                    Strides[2],
                                    iPReLU=iPReLU,
                                    dr_rate=dr_rate,
                                    l2_reg=l2_reg)

    inp2 = Input(shape=(1, int(patchSize_down[0]), int(patchSize_down[1]),
                        int(patchSize_down[2])))
    after_2Conv_1 = sharedConv1(inp2,
                                kernelnumber[0],
                                type=fgetLayerNumConv(),
                                l2_reg=l2_reg)
    after_2DownConv_1 = sharedDown1(after_2Conv_1,
                                    after_2Conv_1._keras_shape[1],
                                    Strides[0],
                                    iPReLU=iPReLU,
                                    dr_rate=dr_rate,
                                    l2_reg=l2_reg)

    after_2Conv_2 = sharedConv2(after_2DownConv_1,
                                kernelnumber[1],
                                type=fgetLayerNumConv(),
                                l2_reg=l2_reg)
    after_2DownConv_2 = sharedDown2(after_2Conv_2,
                                    after_2Conv_2._keras_shape[1],
                                    Strides[1],
                                    iPReLU=iPReLU,
                                    dr_rate=dr_rate,
                                    l2_reg=l2_reg)

    after_2Conv_3 = sharedConv3(after_2DownConv_2,
                                kernelnumber[2],
                                type=fgetLayerNumConv(),
                                l2_reg=l2_reg)
    after_2DownConv_3 = sharedDown3(after_2Conv_3,
                                    after_2Conv_3._keras_shape[1],
                                    Strides[2],
                                    iPReLU=iPReLU,
                                    dr_rate=dr_rate,
                                    l2_reg=l2_reg)
    # fully convolution over the FM from the deepest level
    dropout_out1 = Dropout(dr_rate)(after_1DownConv_3)
    fclayer1 = Conv3D(
        2,
        kernel_size=(1, 1, 1),
        kernel_initializer='he_normal',
        weights=None,
        padding='valid',
        strides=(1, 1, 1),
        kernel_regularizer=l1_l2(l1_reg, l2_reg),
    )(dropout_out1)
    fclayer1 = GlobalAveragePooling3D()(fclayer1)

    # Upsample FM from the deepest level, add with FM from level 2,
    UpedFM_1Level3 = Conv3DTranspose(filters=97,
                                     kernel_size=(3, 3, 1),
                                     strides=(2, 2, 1),
                                     padding='same')(after_1DownConv_3)
    conbined_FM_1Level23 = add([UpedFM_1Level3, after_1DownConv_2])
    fclayer2 = Conv3D(
        2,
        kernel_size=(1, 1, 1),
        kernel_initializer='he_normal',
        weights=None,
        padding='valid',
        strides=(1, 1, 1),
        kernel_regularizer=l1_l2(l1_reg, l2_reg),
    )(conbined_FM_1Level23)
    fclayer2 = GlobalAveragePooling3D()(fclayer2)

    dropout_out2 = Dropout(dr_rate)(after_2DownConv_3)
    fclayer3 = Conv3D(
        2,
        kernel_size=(1, 1, 1),
        kernel_initializer='he_normal',
        weights=None,
        padding='valid',
        strides=(1, 1, 1),
        kernel_regularizer=l1_l2(l1_reg, l2_reg),
    )(dropout_out2)
    fclayer3 = GlobalAveragePooling3D()(fclayer3)

    # Upsample FM from the deepest level, add with FM from level 2,
    UpedFM_2Level3 = Conv3DTranspose(filters=97,
                                     kernel_size=(3, 3, 1),
                                     strides=(2, 2, 1),
                                     padding='same')(after_2DownConv_3)
    conbined_FM_2Level23 = add([UpedFM_2Level3, after_2DownConv_2])
    fclayer4 = Conv3D(
        2,
        kernel_size=(1, 1, 1),
        kernel_initializer='he_normal',
        weights=None,
        padding='valid',
        strides=(1, 1, 1),
        kernel_regularizer=l1_l2(l1_reg, l2_reg),
    )(conbined_FM_2Level23)
    fclayer4 = GlobalAveragePooling3D()(fclayer4)
    # combine the two predictions using average
    fcl_aver = average([fclayer1, fclayer2, fclayer3, fclayer4])
    predict = Activation('softmax')(fcl_aver)
    cnn_fcl_2p = Model(inputs=[inp1, inp2], outputs=predict)
    return cnn_fcl_2p