Exemple #1
0
def Conv3D_KDD():
    WRF_inputs = Input(shape=(num_frames, 159, 159,
                              fea_dim))  # (bs, 6, 159, 159, fea_dim)
    _history_inputs = Input(shape=(num_frames_truth, 159, 159,
                                   1))  # (bs,3,159,159,1)
    # history_inputs = Lambda(lambda x: K.squeeze(x, axis=-1))(_history_inputs)   # (bs, 3, 159, 159)
    history_inputs = Permute(
        (4, 2, 3, 1))(_history_inputs)  #  (bs, 1, 159, 159, 3)
    conv_1 = Conv3D(filters=128,
                    kernel_size=(2, 1, 1),
                    padding='same',
                    name='conv3d_1')(WRF_inputs)
    conv_1 = Activation('relu')(conv_1)
    conv_2 = Conv3D(filters=128,
                    kernel_size=(1, 3, 3),
                    padding='same',
                    name='conv3d_2')(conv_1)
    conv_2 = Activation('relu')(conv_2)
    conv_3 = Conv3D(filters=256,
                    kernel_size=(2, 3, 3),
                    padding='same',
                    name='conv3d_3')(conv_2)
    conv_3 = Activation('relu')(conv_3)
    conv_4 = Conv3D(filters=128,
                    kernel_size=(3, 1, 1),
                    padding='same',
                    name='conv3d_4')(conv_3)
    conv_4 = Activation('relu')(conv_4)
    conv_5 = Conv3D(filters=128,
                    kernel_size=(1, 3, 3),
                    padding='same',
                    name='conv3d_5')(conv_4)
    conv_5 = Activation('relu')(conv_5)
    conv_6 = Conv3D(filters=64,
                    kernel_size=(3, 3, 3),
                    padding='same',
                    name='conv3d_6')(conv_5)
    conv_6 = Activation('relu')(conv_6)
    steps = []
    for i in range(num_frames):
        conv_6_i = Cropping3D(cropping=((i, num_frames - i - 1), (0, 0),
                                        (0,
                                         0)))(conv_6)  # (bs, 1, 159, 159, 64)
        conv2d_in = concatenate([history_inputs, conv_6_i],
                                axis=-1)  # (bs, 1, 159, 159, 64+3)
        conv2d_in = Lambda(lambda x: K.squeeze(x, axis=1))(
            conv2d_in)  # (bs, 159, 159, 67)
        conv2d_1_i = Conv2D(filters=64,
                            kernel_size=(7, 7),
                            padding='same',
                            name='conv2d_1_%d' % i)(conv2d_in)
        conv2d_1_i = Activation('relu')(conv2d_1_i)
        conv2d_2_i = Conv2D(filters=1,
                            kernel_size=(7, 7),
                            padding='same',
                            name='conv2d_2_%d' % i)(conv2d_1_i)
        steps.append(conv2d_2_i)
    conv_out = concatenate(steps, axis=1)  # (bs, 6, 159, 159, 1)
    outputs = Reshape((-1, 159 * 159, 1),
                      input_shape=(-1, 159, 159, 1))(conv_out)
    return Model([WRF_inputs, _history_inputs], outputs, name='Conv3D-KDD')
Exemple #2
0
def getAltModel():
    inputdata = Input((1, 64, 64, 64))
    print "input data " + str(inputdata.shape)
    altconv = Convolution3D(
        128,
        kernel_size=11,
        data_format='channels_first',  #128
        activation='relu',
        padding='valid',
        use_bias=True,
        kernel_initializer='glorot_normal')(inputdata)
    altconv = PReLU()(altconv)
    print "altconv " + str(altconv.shape)
    altpool = MaxPooling3D(pool_size=(2, 2, 2),
                           data_format='channels_first')(altconv)
    print "altpool " + str(altpool.shape)
    altencode = Convolution3D(
        128,
        kernel_size=11,
        data_format='channels_first',  #128
        activation='relu',
        padding='valid',
        use_bias=True,
        kernel_initializer='glorot_normal')(altpool)
    altencode = PReLU()(altencode)
    print "altencode " + str(altencode.shape)
    upsampling1 = UpSampling3D(size=(2, 2, 2),
                               data_format='channels_first')(altencode)
    finalShape = upsampling1.shape
    print "upsampling " + str(finalShape)
    originalShape = altconv.shape  #ALTERNATIVE
    cropShape = int(originalShape[2]/2-finalShape[2]/2),int(originalShape[3]/2-\
      finalShape[3]/2),int(originalShape[4]/2-finalShape[4]/2)
    crop1 = Cropping3D(cropping=cropShape,
                       data_format='channels_first')(altconv)
    print "cropped conv " + str(crop1.shape)
    concatenate1 = concatenate([upsampling1, crop1], axis=1)
    print "concatenate " + str(concatenate1.shape)
    dropout1 = Dropout(0.25)(concatenate1)
    expand1 = Convolution3D(
        256,
        kernel_size=3,
        data_format='channels_first',  #256
        activation='relu',
        padding='valid',
        use_bias=True,
        kernel_initializer='glorot_normal')(dropout1)
    expand1 = PReLU()(expand1)
    print "expand " + str(expand1.shape)
    altoutputdata = Convolution3D(
        1,
        kernel_size=1,
        data_format='channels_first',  #128
        activation='sigmoid',
        padding='valid',
        use_bias=True,
        kernel_initializer='glorot_normal')(expand1)
    print "output " + str(altoutputdata.shape)
    model = Model(inputs=inputdata, outputs=altoutputdata)
    model.compile(optimizer=Adam(lr=1e-5), loss=diceCoef)
    return model
Exemple #3
0
def LSTM_Conv2D_KDD_t2():
    # encoder: layers definition && data flow  --------------------------------------
    encoder_inputs = Input(shape=(None, 159, 159, 1),
                           name='encoder_inputs')  # (bs, 3, 159, 159, 1)
    encoder_conv2d = TimeDistributed(Conv2D(filters=4,
                                            kernel_size=(7, 7),
                                            strides=(2, 2),
                                            padding='same'),
                                     name='en_conv2d')(encoder_inputs)
    encoder_conv2d = TimeDistributed(Activation('relu'))(encoder_conv2d)
    _, h, c = ConvLSTM2D(filters=8,
                         kernel_size=(5, 5),
                         return_sequences=True,
                         return_state=True,
                         padding='same',
                         name='en_convlstm')(encoder_conv2d)
    # --------------------------------------------------------------------------------
    # encoder to decoder: layers definition && data flow  ----------------------------
    h = Conv2D(filters=64,
               kernel_size=(1, 1),
               padding="same",
               name='h_conv2d',
               activation='relu')(h)
    c = Conv2D(filters=64,
               kernel_size=(1, 1),
               padding="same",
               name='c_conv2d',
               activation='relu')(c)
    # --------------------------------------------------------------------------------

    # decoder: layers definition -------------------------------
    _decoder_inputs = Input(shape=(None, 159, 159, 1),
                            name='decoder_inputs')  # (bs, 1, 159,159,1)
    decoder_inputs = _decoder_inputs
    de_conv2d = TimeDistributed(Conv2D(filters=4,
                                       kernel_size=(7, 7),
                                       strides=(2, 2),
                                       padding='same'),
                                name='de_conv2d')
    de_convlstm = ConvLSTM2D(filters=64,
                             kernel_size=(5, 5),
                             name='de_convlstm',
                             return_state=True,
                             padding='same',
                             return_sequences=True)
    de_conv2dT = TimeDistributed(Conv2DTranspose(filters=64,
                                                 kernel_size=(7, 7),
                                                 strides=(2, 2),
                                                 padding='same'),
                                 name='de_conv2dT')
    de_out_conv2d = TimeDistributed(Conv2D(filters=1,
                                           kernel_size=(1, 1),
                                           padding="same"),
                                    name='de_out_conv2d')
    # ----------------------------------------------------------

    relu = Activation('relu')
    sigmoid = Activation('sigmoid')
    cropper = Cropping3D(cropping=((0, 0), (0, 1), (0, 1)))

    # decoder: data flow-----------------------------------------
    decoder_outputs = []
    for i in range(num_frames):
        decoder_conv2d = de_conv2d(decoder_inputs)
        decoder_conv2d = relu(decoder_conv2d)

        decoder_convlstm, h, c = de_convlstm([decoder_conv2d, h, c])

        decoder_conv2dT = de_conv2dT(decoder_convlstm)
        decoder_conv2dT = relu(decoder_conv2dT)

        decoder_out_conv2d = de_out_conv2d(
            decoder_conv2dT)  # (bs, 1, 160, 160, 1)
        decoder_output = cropper(decoder_out_conv2d)  # (bs, 1, 159, 159, 1)
        decoder_outputs.append(decoder_output)
        decoder_output = sigmoid(decoder_output)
        decoder_inputs = decoder_output

    decoder_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(
        decoder_outputs)  # (bs, 6, 159, 159, 1)
    decoder_outputs = Reshape((-1, 159 * 159, 1),
                              input_shape=(-1, 159, 159, 1))(decoder_outputs)
    # ----------------------------------------------------------
    return Model([encoder_inputs, _decoder_inputs],
                 decoder_outputs,
                 name='ConvLSTM-Conv2d-KDD-t2')
Exemple #4
0
def get_unet(img_rows=204, img_cols=204, img_deps=31, img_chans=1):
    # caffe data: batch*channel*depth*rows*cols
    inputs = Input((img_chans, img_deps, img_rows, img_cols))
    conv1 = cLayer(24, (3, 3, 3), 1, 1, inputs)
    relu1 = LeakyReLU(0.005)(conv1)
    conv2 = cLayer(24, (3, 3, 3), 2, 1, relu1)
    relu2 = LeakyReLU(0.005)(conv2)
    pool1 = MaxPooling3D(pool_size=(1, 2, 2),
                         data_format='channels_first',
                         strides=(1, 2, 2))(relu2)

    conv3 = cLayer(72, (3, 3, 3), 3, 1, pool1)
    relu3 = LeakyReLU(0.005)(conv3)
    conv4 = cLayer(72, (3, 3, 3), 4, 1, relu3)
    relu4 = LeakyReLU(0.005)(conv4)
    pool2 = MaxPooling3D(pool_size=(1, 2, 2),
                         data_format='channels_first',
                         strides=(1, 2, 2))(relu4)

    conv5 = cLayer(216, (3, 3, 3), 5, 1, pool2)
    relu5 = LeakyReLU(0.005)(conv5)
    conv6 = cLayer(216, (3, 3, 3), 6, 1, relu5)
    relu6 = LeakyReLU(0.005)(conv6)
    pool3 = MaxPooling3D(pool_size=(1, 2, 2),
                         data_format='channels_first',
                         strides=(1, 2, 2))(relu6)

    conv7 = cLayer(648, (3, 3, 3), 7, 1, pool3)
    relu7 = LeakyReLU(0.005)(conv7)
    conv8 = cLayer(648, (3, 3, 3), 8, 1, relu7)
    relu8 = LeakyReLU(0.005)(conv8)

    dconv1 = cLayer(1, (1, 2, 2),
                    1,
                    2,
                    relu8,
                    strides=(1, 2, 2),
                    num_groups=648)
    conv9 = cLayer(216, (1, 1, 1), 9, 1, dconv1)
    relu6_crop = Cropping3D(cropping=(2, 4, 4),
                            data_format='channels_first')(relu6)
    mc1 = concatenate([conv9, relu6_crop], axis=1)
    conv10 = cLayer(216, (3, 3, 3), 10, 1, mc1)
    relu9 = LeakyReLU(0.005)(conv10)
    conv11 = cLayer(216, (3, 3, 3), 11, 1, relu9)
    relu10 = LeakyReLU(0.005)(conv11)

    dconv2 = cLayer(1, (1, 2, 2),
                    2,
                    2,
                    relu10,
                    strides=(1, 2, 2),
                    num_groups=216)
    conv12 = cLayer(72, (1, 1, 1), 12, 1, dconv2)
    relu4_crop = Cropping3D(cropping=(6, 16, 16),
                            data_format='channels_first')(relu4)
    mc2 = concatenate([conv12, relu4_crop], axis=1)
    conv13 = cLayer(72, (3, 3, 3), 13, 1, mc2)
    relu11 = LeakyReLU(0.005)(conv13)
    conv14 = cLayer(72, (3, 3, 3), 14, 1, relu11)
    relu12 = LeakyReLU(0.005)(conv14)

    dconv3 = cLayer(1, (1, 2, 2),
                    3,
                    2,
                    relu12,
                    strides=(1, 2, 2),
                    num_groups=72)
    conv15 = cLayer(24, (1, 1, 1), 15, 1, dconv3)
    relu2_crop = Cropping3D(cropping=(10, 40, 40),
                            data_format='channels_first')(relu2)
    mc3 = concatenate([conv15, relu2_crop], axis=1)
    conv16 = cLayer(24, (3, 3, 3), 16, 1, mc3)
    relu13 = LeakyReLU(0.005)(conv16)
    conv17 = cLayer(24, (3, 3, 3), 17, 1, relu13)
    relu14 = LeakyReLU(0.005)(conv17)

    #conv18 = cLayer(3, (1, 1, 1), 18, 1, relu14)
    conv18 = cLayer(3, (1, 1, 1), 18, 1, relu14, activation='sigmoid')

    model = Model(inputs=[inputs], outputs=[conv18])
    return model
Exemple #5
0
def getModel():
    inputdata = Input((1, 64, 64, 64))
    conv1 = Convolution3D(128,
                          kernel_size=3,
                          data_format='channels_first',
                          activation='relu',
                          padding='valid',
                          use_bias=True,
                          kernel_initializer='glorot_normal')(inputdata)
    conv1 = PReLU()(conv1)
    conv2 = Convolution3D(128,
                          kernel_size=3,
                          data_format='channels_first',
                          activation='relu',
                          padding='valid',
                          use_bias=True,
                          kernel_initializer='glorot_normal')(conv1)
    conv2 = PReLU()(conv2)
    conv3 = Convolution3D(128,
                          kernel_size=3,
                          data_format='channels_first',
                          activation='relu',
                          padding='valid',
                          use_bias=True,
                          kernel_initializer='glorot_normal')(conv2)
    conv3 = PReLU()(conv3)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2),
                         data_format='channels_first')(conv3)
    encode1 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(pool1)
    encode1 = PReLU()(encode1)
    encode2 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(encode1)
    encode2 = PReLU()(encode2)
    encode3 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(encode2)
    encode3 = PReLU()(encode3)
    encode4 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(encode3)
    encode4 = PReLU()(encode4)
    upsampling1 = UpSampling3D(size=(2, 2, 2),
                               data_format='channels_first')(encode4)
    finalShape = upsampling1.shape
    originalShape = conv3.shape
    cropShape = int(originalShape[2]/2-finalShape[2]/2),int(originalShape[3]/2-\
      finalShape[3]/2),int(originalShape[4]/2-finalShape[4]/2)
    crop1 = Cropping3D(cropping=cropShape, data_format='channels_first')(conv3)
    concatenate1 = concatenate([upsampling1, crop1], axis=1)
    dropout1 = Dropout(0.25)(concatenate1)
    expand1 = Convolution3D(256,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(dropout1)
    expand1 = PReLU()(expand1)
    expand2 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(expand1)
    expand2 = PReLU()(expand2)
    expand3 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(expand2)
    expand3 = PReLU()(expand3)
    expand4 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(expand3)
    expand4 = PReLU()(expand4)
    expand5 = Convolution3D(128,
                            kernel_size=3,
                            data_format='channels_first',
                            activation='relu',
                            padding='valid',
                            use_bias=True,
                            kernel_initializer='glorot_normal')(expand4)
    expand5 = PReLU()(expand5)
    outputdata = Convolution3D(1,
                               kernel_size=1,
                               data_format='channels_first',
                               activation='sigmoid',
                               padding='valid',
                               use_bias=True,
                               kernel_initializer='glorot_normal')(expand5)
    model = Model(inputs=inputdata, outputs=outputdata)
    model.compile(optimizer=Adam(lr=LR), loss=diceCoef)
    #model.compile(optimizer='adadelta', loss=diceCoef)
    print model.summary()
    return model
x3        = Conv3D(filters = 50, 
                   kernel_size = (3,3,3), 
                   dilation_rate = (1,16,16),
                   padding = 'valid',
                   #kernel_initializer=he_normal(seed=seed),
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(L2))(x3)
x3        = BatchNormalization()(x3)
x3        = Activation('relu')(x3)



#############   High res pathway   ##################  

x1        = Cropping3D(cropping = ((0,0),(22,22),(22,22)), input_shape=(dpatch[0],dpatch[1],dpatch[2],  num_channels))(mod1)

for feature in conv_features[0:8]:  
    x1        = Conv3D(filters = feature, 
                       kernel_size = (2,3,3), 
                       #kernel_initializer=he_normal(seed=seed),
                       kernel_initializer=Orthogonal(),
                       kernel_regularizer=regularizers.l2(L2))(x1)
    x1        = BatchNormalization()(x1)
    x1        = Activation('relu')(x1)


#############   Fully connected layers   ################## 

x        = concatenate([x1,x3])
Exemple #7
0
def ADSNet():
    # encoder: layers definition && data flow  --------------------------------------
    # CNN module 1 -------------------------------------
    encoder_inputs = Input(shape=(num_frames_truth, 159, 159, 1), name='encoder_inputs')  # (bs, 3, 159, 159, 1)
    encoder_conv2d_1 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same'),
                                       name='en_conv2d_1')(encoder_inputs)
    encoder_conv2d_1 = TimeDistributed(Activation('relu'))(encoder_conv2d_1)
    encoder_conv2d_1 = TimeDistributed(MaxPooling2D(padding='same'))(encoder_conv2d_1)
    encoder_conv2d_2 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same'),
                                       name='en_conv2d_2')(encoder_conv2d_1)
    encoder_conv2d_2 = TimeDistributed(Activation('relu'))(encoder_conv2d_2)
    encoder_conv2d_2 = TimeDistributed(MaxPooling2D(padding='same'))(encoder_conv2d_2)

    # ---------------------------------------------------
    _, en_h, en_c = ConvLSTM2D(filters=8, kernel_size=(5, 5), return_sequences=True, return_state=True, padding='same',
                               name='en_convlstm')(encoder_conv2d_2)
    # --------------------------------------------------------------------------------
    # # encoder to decoder: layers definition && data flow  --------------------
    en_h = Conv2D(filters=128, kernel_size=(1, 1), padding="same", name='en_de_h', activation='relu')(en_h)
    en_c = Conv2D(filters=128, kernel_size=(1, 1), padding="same", name='en_de_c', activation='relu')(en_c)
    # --------------------------------------------------------------------------------
    # decoder: layers definition && dataflow -----------------------------------------
    decoder_inputs = Input(shape=(num_frames, 159, 159, fea_dim), name='decoder_inputs')  # (bs, 12, 159, 159, fea_dim)
    norm_inputs = Reshape((num_frames, 159 * 159, fea_dim))(decoder_inputs)
    norm_inputs = Lambda(_min_max, arguments={'axis': 2})(norm_inputs)
    norm_inputs = Reshape((num_frames, 159, 159, fea_dim))(norm_inputs)
    # CNN module 2 -----------------------------------------------------------
    de_conv2d_1 = Conv2D(filters=32, kernel_size=(5, 5), padding='same', activation='relu')
    de_conv2d_2 = Conv2D(filters=32, kernel_size=(5, 5), padding='same', activation='relu')

    # -------------------------------------------------------------------------
    # attention module ----------------------------------------------------------------
    att_conv2d_1 = TimeDistributed(DepthwiseConv2D(kernel_size=(5, 5), padding='same', depth_multiplier=1),
                                   name='att_conv2d_1')(norm_inputs)
    att_conv2d_1 = TimeDistributed(Activation('relu'))(att_conv2d_1)
    att_conv2d_1 = TimeDistributed(MaxPooling2D(padding='same'))(att_conv2d_1)
    att_conv2d_2 = TimeDistributed(DepthwiseConv2D(kernel_size=(5, 5), padding='same', depth_multiplier=1),
                                   name='att_conv2d_2')(att_conv2d_1)
    att_conv2d_2 = TimeDistributed(Activation('relu'))(att_conv2d_2)
    att_conv2d_2 = TimeDistributed(MaxPooling2D(padding='same'))(att_conv2d_2)
    att_conv_hc_to_x = Conv2D(filters=1, name='att_conv_hc_to_x', kernel_size=(1, 1))
    att_conv_x = DepthwiseConv2D(name='att_conv_x', kernel_size=(1, 1))
    de_convlstm = ConvLSTM2D(filters=128, return_sequences=True, return_state=True,
                             kernel_size=(5, 5), name='de_convlstm_f', padding='same')
    alpha_list = []
    out_list = []
    att_h = en_h
    att_c = en_c
    for t in range(num_frames):
        _att_x_t = Cropping3D(data_format='channels_last', cropping=((t, num_frames - t - 1), (0, 0), (0, 0)))(
            att_conv2d_2)  # (bs,1,40,40,fea_dim)
        norm_x_t = Cropping3D(data_format='channels_last', cropping=((t, num_frames - t - 1), (0, 0), (0, 0)))(
            norm_inputs)  # (bs,1,159,159,fea_dim)
        att_x_t = Lambda(lambda x: K.squeeze(x, axis=1))(_att_x_t)  # (bs,40,40,fea_dim)
        att_x_t = att_conv_x(att_x_t)  # (bs,40,40,fea_dim)
        hc = Concatenate(axis=-1)([att_h, att_c])  # output: (bs,40,40,128*2)
        hc = att_conv_hc_to_x(hc)  # (bs,40,40,1)
        hc_x = multiply([hc, att_x_t])  # (bs,40,40,fea_dim)
        e = Lambda(lambda x: K.sum(x, axis=[1, 2], keepdims=False))(hc_x)  # (bs,fea_dim)
        e = Lambda(lambda x: x * 0.025)(e)  # (bs,fea_dim)
        alpha = Activation('softmax')(e)  # output: (bs,fea_dim)
        norm_x_t = multiply([alpha, norm_x_t])  # output: (bs,1,40,40,fea_dim)
        norm_x_t = TimeDistributed(de_conv2d_1)(norm_x_t)
        norm_x_t = TimeDistributed(MaxPooling2D(padding='same'))(norm_x_t)
        norm_x_t = TimeDistributed(de_conv2d_2)(norm_x_t)
        norm_x_t = TimeDistributed(MaxPooling2D(padding='same'))(norm_x_t)  # (bs, 1, 40, 40, 32)
        att_o, att_h, att_c = de_convlstm([norm_x_t, att_h, att_c])
        alpha_list.append(alpha)
        out_list.append(att_o)
    decoder_convlstm = Concatenate(axis=1)(out_list)  # output: (bs,12,40,40,128)
    # ---------------------------------------------------------------------------------
    # DCNN module ------------------------------------------------------------
    decoder_conv2dT_1 = TimeDistributed(Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='same'),
                                        name='de_conv2dT_1')(decoder_convlstm)
    decoder_conv2dT_1 = TimeDistributed(Activation('relu'))(decoder_conv2dT_1)
    decoder_conv2dT_2 = TimeDistributed(Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='same'),
                                        name='de_conv2dT_2')(decoder_conv2dT_1)
    decoder_conv2dT_2 = TimeDistributed(Activation('relu'))(decoder_conv2dT_2)
    decoder_outputs = TimeDistributed(Conv2D(filters=1, kernel_size=(1, 1), padding="same"), name='de_out_conv2d')(
        decoder_conv2dT_2)
    # ---------------------------------------------------------------------------------
    decoder_outputs = Cropping3D(cropping=((0, 0), (0, 1), (0, 1)))(decoder_outputs)
    decoder_outputs = Reshape((-1, 159 * 159, 1), input_shape=(-1, 159, 159, 1))(decoder_outputs)

    return Model([decoder_inputs, encoder_inputs], decoder_outputs, name='ADSNet')
Exemple #8
0
    def createModel(self):

        T1post = Input((None, None, None, 1), name='T1post_input')
        T1pre = Input((None, None, None, 1), name='T1pre_input')
        T2 = Input((None, None, None, 1), name='T2_input')

        ########################  T1 post pathway #########################
        #############   High res pathway   ##################
        x11 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                         input_shape=(None, None, None, self.num_channels),
                         name='T1post_Detail')(T1post)

        # reduced original input by -40    : 13,35,35
        for feature in (self.conv_features[0:7]):  # reduce in -36
            x11 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x11)
            x11 = LeakyReLU()(x11)
            x11 = BatchNormalization()(x11)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x11 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x11)
            x11 = LeakyReLU()(x11)
            x11 = BatchNormalization()(x11)
        # 1, 23, 23

        # output of pathway should be 1,9,9
        #############   Context pathway   ##################

#        x12        = Conv3D(filters = 20,
#                               kernel_size = (3,3,3),
#                               dilation_rate=(3,3,3),
#                               padding='same',
#                               kernel_initializer=Orthogonal(),
#                               kernel_regularizer=regularizers.l2(self.L2))(T1post)
#        x12        = LeakyReLU()(x12)
#        x12        = BatchNormalization()(x12)

        x12 = AveragePooling3D(pool_size=(1, 3, 3),
                               strides=(1, 1, 1),
                               name='T1post_Context')(T1post)
        # (13, 25, 25)

        for _ in range(2):
            x12 = Conv3D(
                filters=30,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x12)
            x12 = LeakyReLU()(x12)
            x12 = BatchNormalization()(x12)

        # (13, 21, 21)

        for _ in range(6):
            x12 = Conv3D(
                filters=30,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x12)
            x12 = LeakyReLU()(x12)
            x12 = BatchNormalization()(x12)
        # Result: (1,9,9)
        for _ in range(4):
            x12 = Conv3D(
                filters=30,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x12)
            x12 = LeakyReLU()(x12)
            x12 = BatchNormalization()(x12)

        x12 = UpSampling3D(size=(1, 9, 9))(x12)

        x1 = concatenate([x11, x12])

        #        x1        = Conv3D(filters = 50,
        #                               kernel_size = (1,1,1),
        #                               #padding='same',
        #                               kernel_initializer=Orthogonal(),
        #                               kernel_regularizer=regularizers.l2(self.L2))(x1)
        #        x1        = LeakyReLU()(x1)
        #        x1        = BatchNormalization()(x1)

        ########################  T1 pre pathway #########################
        #############   High res pathway   ##################
        x21 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                         input_shape=(None, None, None, self.num_channels),
                         name='T1pre_Detail')(T1pre)

        # reduced original input by -40    : 13,35,35

        for feature in (self.conv_features[0:7]):  # reduce in -14
            x21 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x21)
            x21 = LeakyReLU()(x21)
            x21 = BatchNormalization()(x21)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x21 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x21)
            x21 = LeakyReLU()(x21)
            x21 = BatchNormalization()(x21)
        # 1, 23, 23

        # output of pathway should be 1,9,9

        #############   Context pathway   ##################

        x22 = AveragePooling3D(pool_size=(1, 3, 3),
                               name='T1pre_Context')(T1pre)
        # (13, 25, 25)

        for _ in range(2):
            x22 = Conv3D(
                filters=30,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x22)
            x22 = LeakyReLU()(x22)
            x22 = BatchNormalization()(x22)

        for _ in range(6):
            x22 = Conv3D(
                filters=30,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x22)
            x22 = LeakyReLU()(x22)
            x22 = BatchNormalization()(x22)
        for _ in range(4):
            x22 = Conv3D(
                filters=30,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x22)
            x22 = LeakyReLU()(x22)
            x22 = BatchNormalization()(x22)

        x22 = UpSampling3D(size=(1, 9, 9))(x22)

        x2 = concatenate([x21, x22])

        ########################  T2 pathway #########################
        #############   High res pathway   ##################
        x31 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                         input_shape=(None, None, None, self.num_channels),
                         name='T2_Detail')(T2)

        # reduced original input by -40    : 13,35,35

        for feature in (self.conv_features[0:7]):  # reduce in -36
            x31 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x31)
            x31 = LeakyReLU()(x31)
            x31 = BatchNormalization()(x31)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x31 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x31)
            x31 = LeakyReLU()(x31)
            x31 = BatchNormalization()(x31)
        # 1, 23, 23

        # output of pathway should be 1,9,9
        #############   Context pathway   ##################
        # starting from (min) 13,75,75

        x32 = AveragePooling3D(pool_size=(1, 3, 3), name='T2_Context')(T2)
        # (13, 25, 25)

        for _ in range(2):
            x32 = Conv3D(
                filters=30,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x32)
            x32 = LeakyReLU()(x32)
            x32 = BatchNormalization()(x32)

        for _ in range(6):
            x32 = Conv3D(
                filters=30,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x32)
            x32 = LeakyReLU()(x32)
            x32 = BatchNormalization()(x32)

        for _ in range(4):
            x32 = Conv3D(
                filters=30,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x32)
            x32 = LeakyReLU()(x32)
            x32 = BatchNormalization()(x32)

        x32 = UpSampling3D(size=(1, 9, 9))(x32)

        x3 = concatenate([x31, x32])

        ########################  Merge Modalities #########################

        TPM = Input((None, None, None, 1), name='TPM')

        x = concatenate([x1, x2, x3, TPM])

        for feature in (self.fc_features[0:2]):
            x = Conv3D(
                filters=100,
                kernel_size=(1, 1, 1),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x)
            x = LeakyReLU()(x)
            x = BatchNormalization()(x)

#        x        = Conv3D(filters = self.output_classes,
#                           kernel_size = (1,1,1),
#                           #kernel_initializer=he_normal(seed=seed),
#                           kernel_initializer=Orthogonal(),
#                           kernel_regularizer=regularizers.l2(self.L2))(x)

        x = concatenate([x, TPM])  #  MIXING ONLY CHANNELS + CHANNELS.

        x = Conv3D(
            filters=100,
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)

        #x        = concatenate([x,TPM])

        #        x        = Conv3D(filters = 4,
        #                   kernel_size = (1,1,1),
        #                   #kernel_initializer=he_normal(seed=seed),
        #                   kernel_initializer=Orthogonal(),
        #                   name = 'Feature_extraction_layer',
        #                   kernel_regularizer=regularizers.l2(self.L2))(x)
        #        x        = LeakyReLU()(x)
        #        x        = BatchNormalization()(x)

        x = Conv3D(filters=2,
                   kernel_size=(1, 1, 1),
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x = Activation('sigmoid')(x)

        model = Model(inputs=[T1post, T1pre, T2, TPM], outputs=x)
        if self.loss_function == 'Dice':
            model.compile(
                loss=Generalised_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Multinomial':
            model.compile(
                loss='binary_crossentropy',
                optimizer=Adam(lr=self.learning_rate),
                metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        return model
Exemple #9
0
    def createModel(self):

        T1post = Input((None, None, None, 1), name='T1post_input')
        T1pre = Input((None, None, None, 1), name='T1pre_input')
        Coords = Input((None, None, None, 3), name='Spatial_coordinates')

        #######################################################################
        ######################### Breast Mask Model ###########################
        #######################################################################

        x_mask = AveragePooling3D(pool_size=(1, 3, 3), name='Context')(T1post)
        # (13, 25, 25)
        for iii in range(6):
            x_mask = Conv3D(filters=30,
                            kernel_size=(3, 3, 3),
                            kernel_initializer=Orthogonal(),
                            name='T1post_Context_{}'.format(iii),
                            kernel_regularizer=regularizers.l2(
                                self.L2))(x_mask)
            x_mask = LeakyReLU()(x_mask)
            x_mask = BatchNormalization()(x_mask)

        for iii in range(5):
            x_mask = Conv3D(filters=30,
                            kernel_size=(1, 3, 3),
                            kernel_initializer=Orthogonal(),
                            name='T1post_Context_{}'.format(iii + 6),
                            kernel_regularizer=regularizers.l2(
                                self.L2))(x_mask)
            x_mask = LeakyReLU()(x_mask)
            x_mask = BatchNormalization()(x_mask)

        x_mask = UpSampling3D(size=(1, 3, 3))(x_mask)

        ######################## FC Parts #############################

        x_mask = concatenate([x_mask, Coords])

        for iii in range(2):
            x_mask = Conv3D(filters=60,
                            kernel_size=(1, 1, 1),
                            kernel_initializer=Orthogonal(),
                            name='T1post_Context_{}'.format(iii + 11),
                            kernel_regularizer=regularizers.l2(
                                self.L2))(x_mask)
            x_mask = LeakyReLU()(x_mask)
            x_mask = BatchNormalization()(x_mask)

        x_mask = Conv3D(filters=100,
                        kernel_size=(1, 1, 1),
                        kernel_initializer=Orthogonal(),
                        name='T1post_Context_14',
                        kernel_regularizer=regularizers.l2(self.L2))(x_mask)
        x_mask = LeakyReLU()(x_mask)
        x_mask = BatchNormalization()(x_mask)

        x_mask = Conv3D(filters=2,
                        kernel_size=(1, 1, 1),
                        name='T1post_Context_15',
                        kernel_initializer=Orthogonal(),
                        kernel_regularizer=regularizers.l2(self.L2))(x_mask)

        #######################################################################
        #######################################################################
        #######################################################################

        ########################  T1 post pathway #########################
        #############   High res pathway   ##################
        x1 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                        input_shape=(None, None, None, self.num_channels),
                        name='T1post_Detail')(T1post)

        # reduced original input by -40    : 13,35,35
        for feature in (self.conv_features[0:7]):  # reduce in -36
            x1 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x1)
            x1 = LeakyReLU()(x1)
            x1 = BatchNormalization()(x1)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x1 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x1)
            x1 = LeakyReLU()(x1)
            x1 = BatchNormalization()(x1)

        ########################  T1 pre pathway #########################
        #############   High res pathway   ##################
        x2 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                        input_shape=(None, None, None, self.num_channels),
                        name='T1pre_Detail')(T1pre)

        # reduced original input by -40    : 13,35,35

        for feature in (self.conv_features[0:7]):  # reduce in -14
            x2 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x2)
            x2 = LeakyReLU()(x2)
            x2 = BatchNormalization()(x2)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x2 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x2)
            x2 = LeakyReLU()(x2)
            x2 = BatchNormalization()(x2)
        # 1, 23, 23

########################  Merge Modalities #########################

        x = concatenate([x1, x2, x_mask])

        for feature in (self.fc_features[0:2]):
            x = Conv3D(
                filters=60,
                kernel_size=(1, 1, 1),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x)
            x = LeakyReLU()(x)
            x = BatchNormalization()(x)

        x = Conv3D(
            filters=100,
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)

        x = Conv3D(filters=2,
                   kernel_size=(1, 1, 1),
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x = Activation('sigmoid')(x)

        model = Model(inputs=[T1post, T1pre, Coords], outputs=x)

        #model = multi_gpu_model(model, gpus=4)

        if self.loss_function == 'Dice':
            model.compile(
                loss=Generalised_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Multinomial':
            model.compile(
                loss='binary_crossentropy',
                optimizer=Adam(lr=self.learning_rate),
                metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        return model
Exemple #10
0
    def createModel(self):

        T1post = Input((None, None, None, 1), name='T1post_input')
        T1pre = Input((None, None, None, 1), name='T1pre_input')
        T2 = Input((None, None, None, 1), name='T2_input')

        ########################  T1 post pathway #########################
        #############   High res pathway   ##################
        x11 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                         input_shape=(None, None, None, self.num_channels),
                         name='T1post_Detail')(T1post)

        # reduced original input by -40    : 13,35,35
        for feature in (self.conv_features[0:7]):  # reduce in -36
            x11 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x11)
            x11 = LeakyReLU()(x11)
            x11 = BatchNormalization()(x11)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x11 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x11)
            x11 = LeakyReLU()(x11)
            x11 = BatchNormalization()(x11)

        x12 = AveragePooling3D(pool_size=(1, 3, 3),
                               name='T1post_Context')(T1post)
        # (13, 25, 25)
        for iii in range(6):
            x12 = Conv3D(
                filters=30,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                name='T1post_Context_{}'.format(iii),
                kernel_regularizer=regularizers.l2(self.L2))(x12)
            x12 = LeakyReLU()(x12)
            x12 = BatchNormalization()(x12)

        for jjj in range(5):
            x12 = Conv3D(
                filters=30,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                name='T1post_Context_{}'.format(6 + jjj),
                kernel_regularizer=regularizers.l2(self.L2))(x12)
            x12 = LeakyReLU()(x12)
            x12 = BatchNormalization()(x12)

        x12 = UpSampling3D(size=(1, 3, 3))(x12)
        # Result: (1,13,13)

        x1 = concatenate([x11, x12])

        ########################  T1 pre pathway #########################
        #############   High res pathway   ##################
        x21 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                         input_shape=(None, None, None, self.num_channels),
                         name='T1pre_Detail')(T1pre)

        # reduced original input by -40    : 13,35,35

        for feature in (self.conv_features[0:7]):  # reduce in -14
            x21 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x21)
            x21 = LeakyReLU()(x21)
            x21 = BatchNormalization()(x21)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x21 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x21)
            x21 = LeakyReLU()(x21)
            x21 = BatchNormalization()(x21)
        # 1, 23, 23

        x2 = x21

        ########################  T2 pathway #########################
        #############   High res pathway   ##################
        x31 = Cropping3D(cropping=((0, 0), (20, 20), (20, 20)),
                         input_shape=(None, None, None, self.num_channels),
                         name='T2_Detail')(T2)

        # reduced original input by -40    : 13,35,35

        for feature in (self.conv_features[0:7]):  # reduce in -36
            x31 = Conv3D(
                filters=feature,
                kernel_size=(1, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x31)
            x31 = LeakyReLU()(x31)
            x31 = BatchNormalization()(x31)

        for feature in self.conv_features[0:6]:  # reduce all dimensions in -12
            x31 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x31)
            x31 = LeakyReLU()(x31)
            x31 = BatchNormalization()(x31)

        x3 = x31

        ########################  Merge Modalities #########################

        TPM = Input((None, None, None, 1), name='TPM')

        x = concatenate([x1, x2, x3, TPM])

        for feature in (self.fc_features[0:2]):
            x = Conv3D(
                filters=60,
                kernel_size=(1, 1, 1),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x)
            x = LeakyReLU()(x)
            x = BatchNormalization()(x)

        x = concatenate([x, TPM])  #  MIXING ONLY CHANNELS + CHANNELS.

        x = Conv3D(
            filters=100,
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)

        x = Conv3D(filters=2,
                   kernel_size=(1, 1, 1),
                   kernel_initializer=Orthogonal(),
                   kernel_regularizer=regularizers.l2(self.L2))(x)
        x = Activation('sigmoid')(x)

        model = Model(inputs=[T1post, T1pre, T2, TPM], outputs=x)

        #model = multi_gpu_model(model, gpus=4)

        if self.loss_function == 'Dice':
            model.compile(
                loss=Generalised_dice_coef_multilabel2,
                optimizer=Adam(lr=self.learning_rate),
                metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        elif self.loss_function == 'Multinomial':
            model.compile(
                loss='binary_crossentropy',
                optimizer=Adam(lr=self.learning_rate),
                metrics=['acc', dice_coef_multilabel0, dice_coef_multilabel1])
        return model
Exemple #11
0
    def createModel(self):
        '''Creates model architecture
        Input: Data input dimensions, eventually architecture specifications parsed from a config file? (activations, costFunction, hyperparameters (nr layers), dropout....)
        Output: Keras Model'''

        #seed = 1337
        #mod1 = Input(input_shape=(None,None,None, self.num_channels))
        mod1 = Input(
            (self.dpatch, self.dpatch, self.dpatch, self.num_channels))

        #############   High res pathway   ##################
        x1 = Cropping3D(cropping=((8, 8), (8, 8), (8, 8)),
                        input_shape=(None, None, None,
                                     self.num_channels))(mod1)
        #x1        = Cropping3D(cropping = ((8,8),(8,8),(8,8)), input_shape=(self.dpatch,self.dpatch,self.dpatch, self.num_channels))(mod1)

        for feature in self.conv_features[0:8]:
            x1 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x1)
            x1 = BatchNormalization()(x1)
            x1 = Activation('relu')(x1)
            #x1        = LeakyReLU()(x1)
            #x1        = BatchNormalization()(x1)

        #############   Downsampled pathway   ##################
        x2 = MaxPooling3D(pool_size=(self.d_factor, self.d_factor,
                                     self.d_factor),
                          padding="same")(mod1)

        x3 = Conv3D(
            filters=feature,
            kernel_size=(3, 3, 3),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(mod1)
        x3 = BatchNormalization()(x3)
        x3 = Activation('relu')(x3)
        #x3        = LeakyReLU()(x3)

        for feature in (self.conv_features_downsample[0:9]):
            x3 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x3)
            x3 = BatchNormalization()(x3)
            x3 = Activation('relu')(x3)
            #x3        = LeakyReLU()(x3)

        #x2        = AveragePooling3D(pool_size=(self.d_factor,self.d_factor,self.d_factor), padding="same")(mod1)

        x2 = concatenate([x2, x3])

        for feature in (self.conv_features[0:6]):
            x2 = Conv3D(
                filters=feature,
                kernel_size=(3, 3, 3),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x2)
            x2 = BatchNormalization()(x2)
            x2 = Activation('relu')(x2)
            #x2        = LeakyReLU()(x2)
            #x2        = BatchNormalization()(x2)

        #x2        = UpSampling3D(size=(9,9,9))(x2)

        #############   Fully connected layers   ##################

        x = concatenate([x1, x2])

        #   Fully convolutional variant

        for feature in (self.conv_features[8:10]):
            x = Conv3D(
                filters=feature,
                kernel_size=(5, 1, 1),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x)
            #x        = BatchNormalization()(x)
            #x        = Activation('relu')(x)
            x = LeakyReLU()(x)
        x = BatchNormalization()(x)
        x = Dropout(rate=self.dropout[0])(x)

        coords = Input((1, 9, 9, 1))

        x = concatenate([x, coords])

        for fc_filters in self.fc_features:
            x = Conv3D(
                filters=fc_filters,
                kernel_size=(1, 1, 1),
                #kernel_initializer=he_normal(seed=seed),
                kernel_initializer=Orthogonal(),
                kernel_regularizer=regularizers.l2(self.L2))(x)
            x = BatchNormalization()(x)
            x = LeakyReLU()(x)

# Final Softmax Layer
        x = Conv3D(
            filters=self.output_classes,
            kernel_size=(1, 1, 1),
            #kernel_initializer=he_normal(seed=seed),
            kernel_initializer=Orthogonal(),
            kernel_regularizer=regularizers.l2(self.L2))(x)
        #x        = BatchNormalization()(x)
        x = Activation(softmax)(x)
        #x        = Dense(units = fc_features[2], activation = 'softmax', name = 'softmax')(x)

        model = Model(inputs=[mod1, coords], outputs=x)
        model.compile(loss=Generalised_dice_coef_multilabel2,
                      optimizer=Adam(lr=self.learning_rate),
                      metrics=[dice_coef_multilabel0, dice_coef_multilabel1])

        return model
Exemple #12
0
def generate_model(num_classes):
    init_input = Input((2, 27, 27, 27))

    x = Conv3D(25,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(init_input)
    x = BatchNormalization()(x)
    x = PReLU()(x)
    x = Conv3D(25,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(x)
    x = BatchNormalization()(x)
    x = PReLU()(x)
    x = Conv3D(25,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(x)
    x = BatchNormalization()(x)
    x = PReLU()(x)

    y = Conv3D(50,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(x)
    y = BatchNormalization()(y)
    y = PReLU()(y)
    y = Conv3D(50,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(y)
    y = BatchNormalization()(y)
    y = PReLU()(y)
    y = Conv3D(50,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(y)
    y = BatchNormalization()(y)
    y = PReLU()(y)

    z = Conv3D(75,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(y)
    z = BatchNormalization()(z)
    z = PReLU()(z)
    z = Conv3D(75,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(z)
    z = BatchNormalization()(z)
    z = PReLU()(z)
    z = Conv3D(75,
               kernel_size=(3, 3, 3),
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(z)
    z = BatchNormalization()(z)
    z = PReLU()(z)

    x_crop = Cropping3D(cropping=((6, 6), (6, 6), (6, 6)))(x)
    y_crop = Cropping3D(cropping=((3, 3), (3, 3), (3, 3)))(y)

    concat = concatenate([x_crop, y_crop, z], axis=1)

    fc = Conv3D(400,
                kernel_size=(1, 1, 1),
                kernel_initializer='he_normal',
                kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(concat)
    fc = BatchNormalization()(fc)
    fc = PReLU()(fc)
    fc = Dropout(0.25)(fc)
    fc = Conv3D(200,
                kernel_size=(1, 1, 1),
                kernel_initializer='he_normal',
                kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(fc)
    fc = BatchNormalization()(fc)
    fc = PReLU()(fc)
    fc = Dropout(0.25)(fc)
    fc = Conv3D(150,
                kernel_size=(1, 1, 1),
                kernel_initializer='he_normal',
                kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(fc)
    fc = BatchNormalization()(fc)
    fc = PReLU()(fc)
    fc = Dropout(0.25)(fc)

    pred = Conv3D(num_classes,
                  kernel_size=(1, 1, 1),
                  kernel_initializer='he_normal',
                  kernel_regularizer=l1_l2(l1=1e-6, l2=1e-4))(fc)
    fc = BatchNormalization()(fc)
    pred = PReLU()(pred)
    pred = Reshape((num_classes, 9 * 9 * 9))(pred)
    pred = Permute((2, 1))(pred)
    pred = ClusteringLayer(num_classes)(pred)

    model = Model(inputs=init_input, outputs=pred)
    model.compile(loss='kld',
                  optimizer='adam',
                  metrics=['categorical_accuracy'])
    return model