示例#1
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2,up1],axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1,up2], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv5)
    #
    conv6 = Conv2D(2, (1, 1), activation='relu',padding='same',data_format='channels_first')(conv5)
    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    #if optimizer= 'sgd', the default values for the hyperparameters will be used.
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])

    return model
示例#2
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2,up1],axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1,up2], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv5)
    #
    conv6 = Conv2D(3, (1, 1), activation='relu',padding='same',data_format='channels_first')(conv5)
    conv6 = core.Reshape((3,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['categorical_accuracy'],sample_weight_mode="temporal")

    return model
示例#3
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input((n_ch,patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)#'valid'
    conv1 = Dropout(0.3)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), padding='same')(pool1) #,activation='relu', padding='same')(pool1)
    conv2 = normalization.BatchNormalization(epsilon=2e-05, axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv2)
    conv2 = Activation('relu')(conv2)
    #conv2 = Dropout(0.3)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)#,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), padding='same')(pool2)   #, activation='relu', padding='same')(pool2)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv3)
    conv3 = Activation('relu')(conv3)
    #conv3 = Dropout(0.3)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)#,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1)
    conv4 = Dropout(0.3)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv4)
    #
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
    conv5 = Dropout(0.3)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv5)

    conv6 = Conv2D(2, (1, 1), activation='relu',padding='same')(conv5)
    #conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    act = Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=act)
    return model
示例#4
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = merge([conv2,up1], mode='concat', concat_axis=1)
    conv4 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = merge([conv1,up2], mode='concat', concat_axis=1)
    conv5 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    conv6 = Conv2D(2, 1, 1, activation='relu', border_mode='same')(conv5)
    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    #model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'], context=['gpu(0)','gpu(1)','gpu(2)'])
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'], context=['gpu(0)'])

    return model
示例#5
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2,up1],axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv4)

    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1,up2], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv5)
    
    conv6 = Conv2D(2, (1, 1), activation='relu',padding='same',data_format='channels_first')(conv5)
    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])

    return model
示例#6
0
def Unet (nClasses , optimizer=None , input_width=360 , input_height=480 , nChannels=1 ): 
    
    inputs = Input((nChannels, input_height, input_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    
    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1], mode='concat', concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
    
    conv6 = Convolution2D(nClasses, 1, 1, activation='relu',border_mode='same')(conv5)
    conv6 = core.Reshape((nClasses,input_height*input_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    if not optimizer is None:
	    model.compile(loss="categorical_crossentropy", optimizer= optimizer , metrics=['accuracy'] )
	
    return model
示例#7
0
def Unet(nClasses, optimizer=None, input_width=64, input_height=96, nChannels=1):
    inputs = Input((input_height, input_width, nChannels))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=-1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv4)

    up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=-1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv5)

    conv6 = Conv2D(nClasses, (1, 1), activation='relu', padding='same')(conv5)
    conv6 = core.Reshape((nClasses, input_height * input_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model
示例#8
0
def set_cnn_model_attention(input_dim=4, input_length=2701):
    attention_reg_x = 0.25
    attention_reg_xr = 1
    attentionhidden_x = 16
    attentionhidden_xr = 8
    nbfilter = 16
    input = Input(shape=(input_length, input_dim))
    x = conv.Convolution1D(nbfilter, 10, border_mode="valid")(input)
    x = Dropout(0.5)(x)
    x = Activation('relu')(x)
    x = conv.MaxPooling1D(pool_length=3)(x)
    x_reshape = core.Reshape((x._keras_shape[2], x._keras_shape[1]))(x)

    x = Dropout(0.5)(x)
    x_reshape = Dropout(0.5)(x_reshape)

    decoder_x = Attention(hidden=attentionhidden_x,
                          activation='linear')  # success
    decoded_x = decoder_x(x)
    output_x = myFlatten(x._keras_shape[2])(decoded_x)

    decoder_xr = Attention(hidden=attentionhidden_xr, activation='linear')
    decoded_xr = decoder_xr(x_reshape)
    output_xr = myFlatten(x_reshape._keras_shape[2])(decoded_xr)

    output = merge([output_x, output_xr, Flatten()(x)], mode='concat')
    #output = BatchNormalization()(output)
    output = Dropout(0.5)(output)
    print output.shape
    output = Dense(nbfilter * 10, activation="relu")(output)
    output = Dropout(0.5)(output)
    out = Dense(2, activation='softmax')(output)
    #output = BatchNormalization()(output)
    model = Model(input, out)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    return model
示例#9
0
def create_nn_architectire(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    #
    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1], mode='concat', concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    conv6 = Convolution2D(2, 1, 1, activation='relu',border_mode='same')(conv5)
    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])

    return model
示例#10
0
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input((patch_height, patch_width, n_ch))
    conv1 = Conv2DReluBatchNorm(32, 3, 3, inputs)
    conv1 = Conv2DReluBatchNorm(32, 3, 3, conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2DReluBatchNorm(64, 3, 3, pool1)
    conv2 = Conv2DReluBatchNorm(64, 3, 3, conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2DReluBatchNorm(128, 3, 3, pool2)
    conv3 = Conv2DReluBatchNorm(128, 3, 3, conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2DReluBatchNorm(256, 3, 3, pool3)
    conv4 = Conv2DReluBatchNorm(256, 3, 3, conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2DReluBatchNorm(512, 3, 3, pool4)
    conv5 = Conv2DReluBatchNorm(512, 3, 3, conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=3)
    conv6 = Conv2DReluBatchNorm(256, 3, 3, up6)
    conv6 = Conv2DReluBatchNorm(256, 3, 3, conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=3)
    conv7 = Conv2DReluBatchNorm(128, 3, 3, up7)
    conv7 = Conv2DReluBatchNorm(128, 3, 3, conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=3)
    conv8 = Conv2DReluBatchNorm(64, 3, 3, up8)
    conv8 = Conv2DReluBatchNorm(64, 3, 3, conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=3)
    conv9 = Conv2DReluBatchNorm(32, 3, 3, up9)
    conv9 = Conv2DReluBatchNorm(32, 3, 3, conv9)

    conv10 = Convolution2D(1,
                           1,
                           1,
                           activation='sigmoid',
                           W_regularizer=l2(0.01))(conv9)

    reshaped = core.Reshape((patch_height, patch_width))(conv10)
    # conv10 = core.Permute((2, 1))(conv10)

    model = Model(input=inputs, output=reshaped)

    # model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=['accuracy', jaccard_coef])
    # model.compile(optimizer=Adam(lr=1e-5), loss='binary_crossentropy',
    #               metrics=['accuracy', jaccard_coef])
    # model.compile(optimizer=Adam(lr=1e-5), loss=jaccard_loss, metrics=['accuracy', jaccard_coef])
    model.compile(optimizer=Adam(lr=1e-5),
                  loss=binary_crossentropy_weight,
                  metrics=['accuracy', jaccard_coef])

    return model
 def test_reshape(self):
     layer = core.Reshape(dims=(10, 10))
     self._runner(layer)
示例#12
0
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    #data_format:字符串,“channels_first”或“channels_last”之一,代表图像的通道维的位置。
    #以128x128的RGB图像为例,“channels_first”应将数据组织为(3,128,128),而“channels_last”应将数据组织为(128,128,3)。该参数的默认值是~/.keras/keras.json中设置的值,若从未设置过,则为“channels_last”。
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    #
    #1×1的卷积的作用
    #大概有两个方面的作用:1. 实现跨通道的交互和信息整合2. 进行卷积核通道数的降维和升维。
    conv6 = Conv2D(2, (1, 1),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    conv6 = core.Reshape((2, patch_height * patch_width))(
        conv6)  #此时output的shape是(batchsize,2,patch_height*patch_width)
    conv6 = core.Permute((2, 1))(
        conv6
    )  #此时output的shape是(Npatch,patch_height*patch_width,2)即输出维度是(Npatch,2304,2)
    ############
    conv7 = core.Activation('softmax')(conv6)
    model = Model(inputs=inputs, outputs=conv7)
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer=Adam(lr=0.001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
    '''
示例#13
0
def DeepCleave_CNN(output_model_name,
                   trainX,
                   trainY,
                   valX=None,
                   valY=None,
                   batch_size=1024,
                   epochs=500,
                   n_earlystop=None,
                   n_transfer_layer=1,
                   background_weights=None,
                   for_transfer=False,
                   compiletimes=0,
                   compilemodels=None,
                   predict=False):
    input_row = trainX.shape[1]
    input_col = trainX.shape[2]

    trainX_t = trainX
    valX_t = valX
    checkpoint = ModelCheckpoint(filepath=output_model_name,
                                 monitor='val_acc',
                                 verbose=1,
                                 mode='max',
                                 save_best_only='True')

    if (n_earlystop is not None):
        early_stopping = EarlyStopping(monitor='val_acc', patience=n_earlystop)
        epochs = 10000
        #set to a very big value since earlystop used

        callback_lists = [early_stopping, checkpoint]
    else:
        callback_lists = [checkpoint]

    trainX_t.shape = (trainX_t.shape[0], input_row, input_col)
    if (valX is not None):
        valX_t.shape = (valX_t.shape[0], input_row, input_col)

    if compiletimes == 0:
        filtersize1 = 1
        filtersize2 = 9
        filtersize3 = 10
        filter1 = 200
        filter2 = 150
        filter3 = 200
        dropout1 = 0.75
        dropout2 = 0.75
        dropout4 = 0.75
        dropout5 = 0.75
        dropout6 = 0.25
        L1CNN = 0
        l2value = 0.001
        nb_classes = 2
        actfun = "relu"
        optimization = 'adam'
        attentionhidden_x = 10
        attentionhidden_xr = 8
        attention_reg_x = 0.151948
        attention_reg_xr = 2
        dense_size1 = 149
        dense_size2 = 8
        dropout_dense1 = 0.298224
        dropout_dense2 = 0

        input2 = Input(shape=(input_row, input_col))

        x = conv.Convolution1D(filter1,
                               filtersize1,
                               init='he_normal',
                               W_regularizer=l2(l2value),
                               border_mode="same")(input2)
        x = Dropout(dropout1)(x)
        x1 = Activation(actfun)(x)

        y1 = conv.Convolution1D(filter2,
                                filtersize2,
                                init='he_normal',
                                W_regularizer=l2(l2value),
                                border_mode="same")(x1)
        y1 = Dropout(dropout2)(y1)
        y1 = Activation(actfun)(y1)

        y2 = conv.Convolution1D(filter2,
                                6,
                                init='he_normal',
                                W_regularizer=l2(l2value),
                                border_mode="same")(x1)
        y2 = Dropout(dropout2)(y2)
        y2 = Activation(actfun)(y2)

        y3 = conv.Convolution1D(filter2,
                                3,
                                init='he_normal',
                                W_regularizer=l2(l2value),
                                border_mode="same")(x1)
        y3 = Dropout(dropout2)(y3)
        y3 = Activation(actfun)(y3)

        mergeY = merge([y1, y2, y3], mode='concat', concat_axis=-1)
        mergeY = Dropout(0.75)(mergeY)

        z1 = conv.Convolution1D(filter3,
                                filtersize3,
                                init='he_normal',
                                W_regularizer=l2(l2value),
                                border_mode="same")(mergeY)
        z1 = Activation(actfun)(z1)

        z2 = conv.Convolution1D(filter3,
                                5,
                                init='he_normal',
                                W_regularizer=l2(l2value),
                                border_mode="same")(mergeY)
        z2 = Activation(actfun)(z2)

        z3 = conv.Convolution1D(filter3,
                                15,
                                init='he_normal',
                                W_regularizer=l2(l2value),
                                border_mode="same")(mergeY)
        z3 = Activation(actfun)(z3)

        x_reshape = core.Reshape((z1._keras_shape[2], z1._keras_shape[1]))(z1)

        x = Dropout(dropout4)(z1)
        x_reshape = Dropout(dropout5)(x_reshape)

        decoder_x = Attention(hidden=attentionhidden_x,
                              activation='linear',
                              init='he_normal',
                              W_regularizer=l1(attention_reg_x))  # success
        decoded_x = decoder_x(x)
        output_x = myFlatten(x._keras_shape[2])(decoded_x)

        decoder_xr = Attention(hidden=attentionhidden_xr,
                               activation='linear',
                               init='he_normal',
                               W_regularizer=l1(attention_reg_xr))
        decoded_xr = decoder_xr(x_reshape)

        output_xr = myFlatten(x_reshape._keras_shape[2])(decoded_xr)

        x_reshape = core.Reshape((z2._keras_shape[2], z2._keras_shape[1]))(z2)

        x = Dropout(dropout4)(z2)
        x_reshape = Dropout(dropout5)(x_reshape)

        decoder_x = Attention(hidden=attentionhidden_x,
                              activation='linear',
                              init='he_normal',
                              W_regularizer=l1(attention_reg_x))  # success
        decoded_x = decoder_x(x)
        output_x2 = myFlatten(x._keras_shape[2])(decoded_x)

        decoder_xr = Attention(hidden=attentionhidden_xr,
                               activation='linear',
                               init='he_normal',
                               W_regularizer=l1(attention_reg_xr))
        decoded_xr = decoder_xr(x_reshape)

        output_xr2 = myFlatten(x_reshape._keras_shape[2])(decoded_xr)

        x_reshape = core.Reshape((z3._keras_shape[2], z3._keras_shape[1]))(z3)

        x = Dropout(dropout4)(z3)
        x_reshape = Dropout(dropout5)(x_reshape)

        decoder_x = Attention(hidden=attentionhidden_x,
                              activation='linear',
                              init='he_normal',
                              W_regularizer=l1(attention_reg_x))  # success
        decoded_x = decoder_x(x)
        output_x3 = myFlatten(x._keras_shape[2])(decoded_x)

        decoder_xr = Attention(hidden=attentionhidden_xr,
                               activation='linear',
                               init='he_normal',
                               W_regularizer=l1(attention_reg_xr))
        decoded_xr = decoder_xr(x_reshape)

        output_xr3 = myFlatten(x_reshape._keras_shape[2])(decoded_xr)

        output = merge([
            output_x, output_xr, output_x2, output_xr2, output_x3, output_xr3
        ],
                       mode='concat')

        output = Dropout(dropout6)(output)
        output = Dense(dense_size1, init='he_normal',
                       activation='relu')(output)
        output = Dropout(dropout_dense1)(output)
        output = Dense(dense_size2, activation="relu",
                       init='he_normal')(output)
        output = Dropout(dropout_dense2)(output)
        out = Dense(nb_classes, init='he_normal', activation='softmax')(output)
        cnn = Model(input2, out)
        cnn.compile(loss='binary_crossentropy',
                    optimizer=optimization,
                    metrics=['accuracy'])

    else:
        cnn = compilemodels

    if (predict is False):
        if (background_weights is not None
                and compiletimes == 0):  #for the first time
            if not for_transfer:
                cnn.load_weights(background_weights)
            else:
                cnn2 = Model(input2, out)
                cnn2.compile(loss='binary_crossentropy',
                             optimizer=optimization,
                             metrics=['accuracy'])

                cnn2.load_weights(background_weights)
                for l in range(
                    (len(cnn2.layers) -
                     n_transfer_layer)):  #the last cnn is not included
                    cnn.layers[l].set_weights(cnn2.layers[l].get_weights())
                    cnn.layers[l].trainable = False  # for frozen layer
                cnn.compile(loss='binary_crossentropy',
                            optimizer=optimization,
                            metrics=['accuracy'])

        if (valX is not None):
            if (n_earlystop is None):
                fitHistory = cnn.fit(trainX_t,
                                     trainY,
                                     batch_size=batch_size,
                                     epochs=epochs,
                                     validation_data=(valX_t, valY))

            else:
                fitHistory = cnn.fit(trainX_t,
                                     trainY,
                                     batch_size=batch_size,
                                     epochs=epochs,
                                     validation_data=(valX_t, valY),
                                     callbacks=callback_lists)
        else:
            fitHistory = cnn.fit(trainX_t,
                                 trainY,
                                 batch_size=batch_size,
                                 epochs=epochs)

    return cnn
示例#14
0
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 1))
        conv1 = Conv2D(32, (3, 3), padding='same')(inputs)  # 'valid'
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        conv1 = Dropout(0.2)(conv1)
        conv1 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv1)
        conv1 = Conv2D(32, (3, 3), dilation_rate=2, padding='same')(conv1)
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        conv1 = Conv2D(32, (3, 3), dilation_rate=4, padding='same')(conv1)
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        # pool1 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(pool1)
        conv2 = Conv2D(64, (3, 3), padding='same')(
            pool1)  # ,activation='relu', padding='same')(pool1)
        conv2 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        conv2 = Dropout(0.2)(conv2)
        conv2 = Conv2D(64, (3, 3), dilation_rate=2, padding='same')(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        conv2 = Conv2D(64, (3, 3), dilation_rate=4, padding='same')(
            conv2)  # ,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        # crop = Cropping2D(cropping=((int(3 * patch_height / 8), int(3 * patch_height / 8)), (int(3 * patch_width / 8), int(3 * patch_width / 8))))(conv1)
        # conv3 = concatenate([crop,pool2], axis=1)
        conv3 = Conv2D(128, (3, 3), padding='same')(
            pool2)  # , activation='relu', padding='same')(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)
        conv3 = Dropout(0.2)(conv3)
        conv3 = Conv2D(128, (3, 3), dilation_rate=2, padding='same')(
            conv3)  # ,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)

        conv3 = Conv2D(128, (3, 3), dilation_rate=4, padding='same')(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=1,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)

        # up1 = UpSampling2D(size=(2, 2))(conv3)
        up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=3)
        conv4 = Conv2D(64, (3, 3), padding='same')(up1)
        conv4 = LeakyReLU(alpha=0.3)(conv4)
        conv4 = Dropout(0.2)(conv4)
        conv4 = Conv2D(64, (3, 3), padding='same')(conv4)
        conv4 = LeakyReLU(alpha=0.3)(conv4)
        # conv4 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv4)
        #
        # up2 = UpSampling2D(size=(2, 2))(conv4)
        up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=3)
        conv5 = Conv2D(32, (3, 3), padding='same')(up2)
        conv5 = LeakyReLU(alpha=0.3)(conv5)
        conv5 = Dropout(0.2)(conv5)
        conv5 = Conv2D(32, (3, 3), padding='same')(conv5)
        conv5 = LeakyReLU(alpha=0.3)(conv5)

        conv6 = Conv2D(self.num_seg_class + 1, (1, 1), padding='same')(conv5)
        conv6 = LeakyReLU(alpha=0.3)(conv6)
        # conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

        # for tensorflow
        # conv6 = core.Reshape((patch_height*patch_width,num_lesion_class+1))(conv6)
        # for theano
        conv6 = core.Reshape((self.patch_height * self.patch_width,
                              self.num_seg_class + 1))(conv6)
        #conv6 = core.Permute((2, 1))(conv6)
        ############
        act = Activation('softmax')(conv6)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])
        # self.config.checkpoint = "C:\\Users\\kk\\Desktop\\Optic-Disc-Unet-master\\Optic-Disc-Unet-master\\experiments\\OpticDisc\\checkpoint"
        plot_model(model,
                   to_file=os.path.join(self.config.checkpoint, "model.png"),
                   show_shapes=True)
        self.model = model
# base_url = "" # linux
ratings = pd.read_csv(base_url + "ml-1m/ratings.dat",
                      sep='::',
                      names=['user_id', 'movie_id', 'rating', 'timestamp'])
n_users = np.max(ratings['user_id'])
n_movies = np.max(ratings['movie_id'])
print([n_users, n_movies, len(ratings)])  # ['userId', 'movieId', 100005]

plt.hist(ratings['rating'])
plt.show()
print(np.mean(ratings['rating']))

# 第一个小网络,处理用户嵌入层
model1 = Sequential()
model1.add(Embedding(n_users + 1, k, input_length=1))
model1.add(core.Reshape((k, )))  # keras.layers.core.Reshape

# 第二个小网络,处理电影嵌入层
model2 = Sequential()
model2.add(Embedding(n_movies + 1, k, input_length=1))
model2.add(core.Reshape((k, )))

# 第三个小网络,在第一,二个网络基础上叠加乘积运算
model = Sequential()
model.add(merge([model1, model2], mode='dot', dot_axes=1))

# 输出层和最后评分作对比,后向传播更新网络参数
model.compile(loss='mse', optimizer='adam')
# 另外可以尝试 optimizer = 'rmsprop' 或 'adagrad'

# 获取用户索引数据和电影索引数据,相应的特征矩阵X_train需要两个索引数据一起构造
示例#16
0
def get_unet2(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(inputs)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Dropout(0.3)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv1)
    conv1 = BatchNormalization(axis=1)(conv1)
    pool1 = MaxPooling2D((2, 2), data_format='channels_first')(conv1)

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool1)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Dropout(0.3)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv2)
    conv2 = BatchNormalization(axis=1)(conv2)
    pool2 = MaxPooling2D((2, 2), data_format='channels_first')(conv2)

    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool2)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Dropout(0.3)(conv3)
    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv3)
    conv3 = BatchNormalization(axis=1)(conv3)
    pool3 = MaxPooling2D((2, 2), data_format='channels_first')(conv3)

    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool3)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Dropout(0.3)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv4)
    conv4 = BatchNormalization(axis=1)(conv4)
    pool4 = MaxPooling2D((2, 2), data_format='channels_first')(conv4)

    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool4)
    conv5 = BatchNormalization(axis=1)(conv5)
    conv5 = Dropout(0.3)(conv5)
    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    conv5 = BatchNormalization(axis=1)(conv5)

    up1 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv5)
    up1 = concatenate([conv4, up1], axis=1)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Dropout(0.3)(conv6)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv6)
    conv6 = BatchNormalization(axis=1)(conv6)

    up2 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv6)
    up2 = concatenate([conv3, up2], axis=1)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Dropout(0.3)(conv7)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv7)
    conv7 = BatchNormalization(axis=1)(conv7)

    up3 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv7)
    up3 = concatenate([conv2, up3], axis=1)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up3)
    conv8 = BatchNormalization(axis=1)(conv8)
    conv8 = Dropout(0.3)(conv8)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv8)
    conv8 = BatchNormalization(axis=1)(conv8)

    up4 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv8)
    up4 = concatenate([conv1, up4], axis=1)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up4)
    conv9 = BatchNormalization(axis=1)(conv9)
    conv9 = Dropout(0.3)(conv9)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv9)
    conv9 = BatchNormalization(axis=1)(conv9)

    conv10 = Conv2D(2, (1, 1),
                    activation='relu',
                    padding='same',
                    data_format='channels_first')(conv9)
    conv10 = BatchNormalization(axis=1)(conv10)
    conv10 = core.Reshape((2, patch_height * patch_width))(conv10)
    conv10 = core.Permute((2, 1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    adaGrad = Adagrad(lr=1e-7, epsilon=1e-7, decay=1e-6)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
示例#17
0
up3 = concatenate([conv0, up3], axis=1)
conv6 = Conv2D(16, (3, 3),
               activation='relu',
               padding='same',
               data_format='channels_first')(up3)
conv6 = Dropout(0.2)(conv6)
conv6 = Conv2D(16, (3, 3),
               activation='relu',
               padding='same',
               data_format='channels_first')(conv6)

conv7 = Conv2D(3, (1, 1),
               activation='relu',
               padding='same',
               data_format='channels_first')(conv6)
conv7 = core.Reshape((3, 128 * 128))(conv7)
#conv6 = core.Permute((3,1))(conv6)
conv7 = core.Flatten()(conv7)
#conv7 = core.Dense(64)(conv7)
#conv7 = core.Activation('relu')(conv7)
#conv7 = Dropout(0.2)(conv7)
conv7 = core.Dense(2)(conv7)

############
conv8 = core.Activation('softmax')(conv7)

model = Model(input=inputs, output=conv8)

# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
示例#18
0
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), padding='same')(inputs)  #'valid'
    conv1 = LeakyReLU(alpha=0.3)(conv1)
    conv1 = Dropout(0.2)(conv1)
    conv1 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv1)
    conv1 = Conv2D(32, (3, 3), dilation_rate=2, padding='same')(conv1)
    conv1 = LeakyReLU(alpha=0.3)(conv1)
    conv1 = Conv2D(32, (3, 3), dilation_rate=4, padding='same')(conv1)
    conv1 = LeakyReLU(alpha=0.3)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    #pool1 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(pool1)
    conv2 = Conv2D(64, (3, 3), padding='same')(
        pool1)  #,activation='relu', padding='same')(pool1)
    conv2 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv2)
    conv2 = LeakyReLU(alpha=0.3)(conv2)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), dilation_rate=2, padding='same')(conv2)
    conv2 = LeakyReLU(alpha=0.3)(conv2)
    conv2 = Conv2D(64, (3, 3), dilation_rate=4, padding='same')(
        conv2)  #,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
    conv2 = LeakyReLU(alpha=0.3)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    #crop = Cropping2D(cropping=((int(3 * patch_height / 8), int(3 * patch_height / 8)), (int(3 * patch_width / 8), int(3 * patch_width / 8))))(conv1)
    #conv3 = concatenate([crop,pool2], axis=1)
    conv3 = Conv2D(128, (3, 3), padding='same')(
        pool2)  #, activation='relu', padding='same')(conv3)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv3)
    conv3 = LeakyReLU(alpha=0.3)(conv3)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), dilation_rate=2, padding='same')(
        conv3)  #,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv3)
    conv3 = LeakyReLU(alpha=0.3)(conv3)

    conv3 = Conv2D(128, (3, 3), dilation_rate=4, padding='same')(conv3)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv3)
    conv3 = LeakyReLU(alpha=0.3)(conv3)

    #up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=1)
    conv4 = Conv2D(64, (3, 3), padding='same')(up1)
    conv4 = LeakyReLU(alpha=0.3)(conv4)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), padding='same')(conv4)
    conv4 = LeakyReLU(alpha=0.3)(conv4)
    #conv4 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv4)
    #
    #up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=1)
    conv5 = Conv2D(32, (3, 3), padding='same')(up2)
    conv5 = LeakyReLU(alpha=0.3)(conv5)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), padding='same')(conv5)
    conv5 = LeakyReLU(alpha=0.3)(conv5)

    conv6 = Conv2D(num_lesion_class + 1, (1, 1), padding='same')(conv5)
    conv6 = LeakyReLU(alpha=0.3)(conv6)
    #conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

    #for tensorflow
    #conv6 = core.Reshape((patch_height*patch_width,num_lesion_class+1))(conv6)
    #for theano
    conv6 = core.Reshape(
        ((num_lesion_class + 1), patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    act = Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=act)
    return model
示例#19
0
def test_reshape():
    layer = core.Reshape(dims=(10, 10))
    _runner(layer)
示例#20
0
def R_Unet(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (1, 1), activation=None, padding='same')(inputs)
    conv1 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv1)
    conv1 = Activation('relu')(conv1)

    conv1 = DenseBlock(conv1, 32)  #48
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = DenseBlock(pool1, 64)  #24
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = DenseBlock(pool2, 64)  #12
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = DenseBlock(pool3, 64)  # 12

    up1 = Conv2DTranspose(64, (3, 3),
                          strides=2,
                          activation='relu',
                          padding='same')(conv4)
    up1 = concatenate([up1, conv3], axis=1)

    conv5 = DenseBlock(up1, 64)

    up2 = Conv2DTranspose(64, (3, 3),
                          strides=2,
                          activation='relu',
                          padding='same')(conv5)
    up2 = concatenate([up2, conv2], axis=1)

    conv6 = DenseBlock(up2, 64)

    up3 = Conv2DTranspose(64, (3, 3),
                          strides=2,
                          activation='relu',
                          padding='same')(conv6)
    up3 = concatenate([up3, conv1], axis=1)

    conv7 = DenseBlock(up3, 32)

    conv8 = Conv2D(num_lesion_class + 1, (1, 1),
                   activation='relu',
                   padding='same')(conv7)
    # conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

    # for tensorflow
    # conv6 = core.Reshape((patch_height*patch_width,num_lesion_class+1))(conv6)
    # for theano
    conv8 = core.Reshape(
        ((num_lesion_class + 1), patch_height * patch_width))(conv8)
    conv8 = core.Permute((2, 1))(conv8)
    ############
    act = Activation('softmax')(conv8)

    model = Model(inputs=inputs, outputs=act)
    return model
示例#21
0
def get_unet_seg(n_ch, img_rows=480, img_cols=480):
    inputs = Input((n_ch, img_rows, img_cols))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_1")(inputs)
    conv1 = BatchNormalization(axis=1, name="conv1_2")(conv1)
    conv1 = Dropout(0.5, name="conv1_3")(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_4")(conv1)
    conv1 = BatchNormalization(axis=1, name="conv1_5")(conv1)
    conv1.trainable = False
    pool1 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv1_6")(conv1)
    pool1.trainable = False

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_1")(pool1)
    conv2 = BatchNormalization(axis=1, name="conv2_2")(conv2)
    conv2 = Dropout(0.5, name="conv2_3")(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_4")(conv2)
    conv2 = BatchNormalization(axis=1, name="conv2_5")(conv2)
    conv2.trainable = False
    pool2 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv2_6")(conv2)
    pool2.trainable = False

    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_1")(pool2)
    conv3 = BatchNormalization(axis=1, name="conv3_2")(conv3)
    conv3 = Dropout(0.5, name="conv3_3")(conv3)
    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_4")(conv3)
    conv3 = BatchNormalization(axis=1, name="conv3_5")(conv3)
    conv3.trainable = False
    pool3 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv3_6")(conv3)
    pool3.trainable = False

    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_1")(pool3)
    conv4 = BatchNormalization(axis=1, name="conv4_2")(conv4)
    conv4 = Dropout(0.5, name="conv4_3")(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_4")(conv4)
    conv4 = BatchNormalization(axis=1, name="conv4_5")(conv4)
    conv4.trainable = False
    pool4 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv4_6")(conv4)
    pool4.trainable = False

    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_1")(pool4)
    conv5 = BatchNormalization(axis=1, name="conv5_2")(conv5)
    conv5 = Dropout(0.5, name="conv5_3")(conv5)
    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_4")(conv5)
    conv5 = BatchNormalization(axis=1, name="conv5_5")(conv5)
    conv5.trainable = False

    up1 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv5)
    up1 = concatenate([conv4, up1], axis=1)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Dropout(0.3)(conv6)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv6)
    conv6 = BatchNormalization(axis=1)(conv6)

    up2 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv6)
    up2 = concatenate([conv3, up2], axis=1)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Dropout(0.3)(conv7)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv7)
    conv7 = BatchNormalization(axis=1)(conv7)

    up3 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv7)
    up3 = concatenate([conv2, up3], axis=1)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up3)
    conv8 = BatchNormalization(axis=1)(conv8)
    conv8 = Dropout(0.3)(conv8)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv8)
    conv8 = BatchNormalization(axis=1)(conv8)

    up4 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv8)
    up4 = concatenate([conv1, up4], axis=1)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up4)
    conv9 = BatchNormalization(axis=1)(conv9)
    conv9 = Dropout(0.3)(conv9)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv9)
    conv9 = BatchNormalization(axis=1)(conv9)

    conv10 = Conv2D(2, (1, 1),
                    activation='relu',
                    padding='same',
                    data_format='channels_first')(conv9)
    conv10 = BatchNormalization(axis=1)(conv10)
    conv10 = core.Reshape((2, img_rows * img_cols))(conv10)
    conv10 = core.Permute((2, 1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    adaGrad = Adagrad(lr=1e-7, epsilon=1e-7, decay=1e-6)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
示例#22
0
class LayerCorrectnessTest(keras_parameterized.TestCase):
    def setUp(self):
        super(LayerCorrectnessTest, self).setUp()
        # Set two virtual CPUs to test MirroredStrategy with multiple devices
        cpus = tf.config.list_physical_devices('CPU')
        tf.config.set_logical_device_configuration(cpus[0], [
            tf.config.LogicalDeviceConfiguration(),
            tf.config.LogicalDeviceConfiguration(),
        ])

    def _create_model_from_layer(self, layer, input_shapes):
        inputs = [layers.Input(batch_input_shape=s) for s in input_shapes]
        if len(inputs) == 1:
            inputs = inputs[0]
        y = layer(inputs)
        model = models.Model(inputs, y)
        model.compile('sgd', 'mse')
        return model

    @parameterized.named_parameters(
        ('LeakyReLU', advanced_activations.LeakyReLU, (2, 2)),
        ('PReLU', advanced_activations.PReLU, (2, 2)),
        ('ELU', advanced_activations.ELU, (2, 2)),
        ('ThresholdedReLU', advanced_activations.ThresholdedReLU, (2, 2)),
        ('Softmax', advanced_activations.Softmax, (2, 2)),
        ('ReLU', advanced_activations.ReLU, (2, 2)),
        ('Conv1D', lambda: convolutional.Conv1D(2, 2), (2, 2, 1)),
        ('Conv2D', lambda: convolutional.Conv2D(2, 2), (2, 2, 2, 1)),
        ('Conv3D', lambda: convolutional.Conv3D(2, 2), (2, 2, 2, 2, 1)),
        ('Conv2DTranspose', lambda: convolutional.Conv2DTranspose(2, 2),
         (2, 2, 2, 2)),
        ('SeparableConv2D', lambda: convolutional.SeparableConv2D(2, 2),
         (2, 2, 2, 1)),
        ('DepthwiseConv2D', lambda: convolutional.DepthwiseConv2D(2, 2),
         (2, 2, 2, 1)),
        ('UpSampling2D', convolutional.UpSampling2D, (2, 2, 2, 1)),
        ('ZeroPadding2D', convolutional.ZeroPadding2D, (2, 2, 2, 1)),
        ('Cropping2D', convolutional.Cropping2D, (2, 3, 3, 1)),
        ('ConvLSTM2D',
         lambda: convolutional_recurrent.ConvLSTM2D(4, kernel_size=(2, 2)),
         (4, 4, 4, 4, 4)),
        ('Dense', lambda: core.Dense(2), (2, 2)),
        ('Dropout', lambda: core.Dropout(0.5), (2, 2)),
        ('SpatialDropout2D', lambda: core.SpatialDropout2D(0.5), (2, 2, 2, 2)),
        ('Activation', lambda: core.Activation('sigmoid'), (2, 2)),
        ('Reshape', lambda: core.Reshape((1, 4, 1)), (2, 2, 2)),
        ('Permute', lambda: core.Permute((2, 1)), (2, 2, 2)),
        ('Attention', dense_attention.Attention, [(2, 2, 3), (2, 3, 3),
                                                  (2, 3, 3)]),
        ('AdditiveAttention', dense_attention.AdditiveAttention, [(2, 2, 3),
                                                                  (2, 3, 3),
                                                                  (2, 3, 3)]),
        ('Embedding', lambda: embeddings.Embedding(4, 4),
         (2, 4), 2e-3, 2e-3, np.random.randint(4, size=(2, 4))),
        ('LocallyConnected1D', lambda: local.LocallyConnected1D(2, 2),
         (2, 2, 1)),
        ('LocallyConnected2D', lambda: local.LocallyConnected2D(2, 2),
         (2, 2, 2, 1)),
        ('Add', merge.Add, [(2, 2), (2, 2)]),
        ('Subtract', merge.Subtract, [(2, 2), (2, 2)]),
        ('Multiply', merge.Multiply, [(2, 2), (2, 2)]),
        ('Average', merge.Average, [(2, 2), (2, 2)]),
        ('Maximum', merge.Maximum, [(2, 2), (2, 2)]),
        ('Minimum', merge.Minimum, [(2, 2), (2, 2)]),
        ('Concatenate', merge.Concatenate, [(2, 2), (2, 2)]),
        ('Dot', lambda: merge.Dot(1), [(2, 2), (2, 2)]),
        ('GaussianNoise', lambda: noise.GaussianNoise(0.5), (2, 2)),
        ('GaussianDropout', lambda: noise.GaussianDropout(0.5), (2, 2)),
        ('AlphaDropout', lambda: noise.AlphaDropout(0.5), (2, 2)),
        ('BatchNormalization', normalization_v2.BatchNormalization,
         (2, 2), 1e-2, 1e-2),
        ('LayerNormalization', normalization.LayerNormalization, (2, 2)),
        ('LayerNormalizationUnfused',
         lambda: normalization.LayerNormalization(axis=1), (2, 2, 2)),
        ('MaxPooling2D', pooling.MaxPooling2D, (2, 2, 2, 1)),
        ('AveragePooling2D', pooling.AveragePooling2D, (2, 2, 2, 1)),
        ('GlobalMaxPooling2D', pooling.GlobalMaxPooling2D, (2, 2, 2, 1)),
        ('GlobalAveragePooling2D', pooling.GlobalAveragePooling2D,
         (2, 2, 2, 1)),
        ('SimpleRNN', lambda: recurrent.SimpleRNN(units=4),
         (4, 4, 4), 1e-2, 1e-2),
        ('GRU', lambda: recurrent.GRU(units=4), (4, 4, 4)),
        ('LSTM', lambda: recurrent.LSTM(units=4), (4, 4, 4)),
        ('GRUV2', lambda: recurrent_v2.GRU(units=4), (4, 4, 4)),
        ('LSTMV2', lambda: recurrent_v2.LSTM(units=4), (4, 4, 4)),
        ('TimeDistributed', lambda: wrappers.TimeDistributed(core.Dense(2)),
         (2, 2, 2)),
        ('Bidirectional',
         lambda: wrappers.Bidirectional(recurrent.SimpleRNN(units=4)),
         (2, 2, 2)),
        ('AttentionLayerCausal',
         lambda: dense_attention.Attention(causal=True), [(2, 2, 3), (2, 3, 3),
                                                          (2, 3, 3)]),
        ('AdditiveAttentionLayerCausal',
         lambda: dense_attention.AdditiveAttention(causal=True), [(2, 3, 4),
                                                                  (2, 3, 4),
                                                                  (2, 3, 4)]),
    )
    def test_layer(self,
                   f32_layer_fn,
                   input_shape,
                   rtol=2e-3,
                   atol=2e-3,
                   input_data=None):
        """Tests a layer by comparing the float32 and mixed precision weights.

    A float32 layer, a mixed precision layer, and a distributed mixed precision
    layer are run. The three layers are identical other than their dtypes and
    distribution strategies. The outputs after predict() and weights after fit()
    are asserted to be close.

    Args:
      f32_layer_fn: A function returning a float32 layer. The other two layers
        will automatically be created from this
      input_shape: The shape of the input to the layer, including the batch
        dimension. Or a list of shapes if the layer takes multiple inputs.
      rtol: The relative tolerance to be asserted.
      atol: The absolute tolerance to be asserted.
      input_data: A Numpy array with the data of the input. If None, input data
        will be randomly generated
    """

        if f32_layer_fn == convolutional.ZeroPadding2D and \
           tf.test.is_built_with_rocm():
            return
        if isinstance(input_shape[0], int):
            input_shapes = [input_shape]
        else:
            input_shapes = input_shape
        strategy = create_mirrored_strategy()
        f32_layer = f32_layer_fn()

        # Create the layers
        assert f32_layer.dtype == f32_layer._compute_dtype == 'float32'
        config = f32_layer.get_config()
        config['dtype'] = policy.Policy('mixed_float16')
        mp_layer = f32_layer.__class__.from_config(config)
        distributed_mp_layer = f32_layer.__class__.from_config(config)

        # Compute per_replica_input_shapes for the distributed model
        global_batch_size = input_shapes[0][0]
        assert global_batch_size % strategy.num_replicas_in_sync == 0, (
            'The number of replicas, %d, does not divide the global batch size of '
            '%d' % (strategy.num_replicas_in_sync, global_batch_size))
        per_replica_batch_size = (global_batch_size //
                                  strategy.num_replicas_in_sync)
        per_replica_input_shapes = [(per_replica_batch_size, ) + s[1:]
                                    for s in input_shapes]

        # Create the models
        f32_model = self._create_model_from_layer(f32_layer, input_shapes)
        mp_model = self._create_model_from_layer(mp_layer, input_shapes)
        with strategy.scope():
            distributed_mp_model = self._create_model_from_layer(
                distributed_mp_layer, per_replica_input_shapes)

        # Set all model weights to the same values
        f32_weights = f32_model.get_weights()
        mp_model.set_weights(f32_weights)
        distributed_mp_model.set_weights(f32_weights)

        # Generate input data
        if input_data is None:
            # Cast inputs to float16 to avoid measuring error from having f16 layers
            # cast to float16.
            input_data = [
                np.random.normal(size=s).astype('float16')
                for s in input_shapes
            ]
            if len(input_data) == 1:
                input_data = input_data[0]

        # Assert all models have close outputs.
        f32_output = f32_model.predict(input_data)
        mp_output = mp_model.predict(input_data)
        self.assertAllClose(mp_output, f32_output, rtol=rtol, atol=atol)
        self.assertAllClose(distributed_mp_model.predict(input_data),
                            f32_output,
                            rtol=rtol,
                            atol=atol)

        # Run fit() on models
        output = np.random.normal(
            size=f32_model.outputs[0].shape).astype('float16')
        for model in f32_model, mp_model, distributed_mp_model:
            model.fit(input_data, output, batch_size=global_batch_size)

        # Assert all models have close weights
        f32_weights = f32_model.get_weights()
        self.assertAllClose(mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
        self.assertAllClose(distributed_mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
示例#23
0
def baseline_unet(patch_height, patch_width, n_ch):
    inputs = Input(shape=(patch_height, patch_width, n_ch))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(conv1)
    pool1 = MaxPooling2D((2, 2), data_format='channels_last')(conv1)

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(conv2)
    pool2 = MaxPooling2D((2, 2), data_format='channels_last')(conv2)

    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(conv3)

    up1 = UpSampling2D(size=(2, 2), data_format='channels_last')(conv3)
    up1 = Concatenate(axis=3)([conv2, up1])

    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(conv4)

    up2 = UpSampling2D(size=(2, 2), data_format='channels_last')(conv4)
    up2 = Concatenate(axis=3)([conv1, up2])

    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(conv5)

    conv6 = Conv2D(2, (1, 1),
                   activation='relu',
                   padding='same',
                   data_format='channels_last')(conv5)
    conv6 = core.Reshape((patch_height * patch_width, 2))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)
    print(model.summary())

    opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=0.0)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
示例#24
0
def MultiCNN(trainX,
             trainY,
             valX=None,
             valY=None,
             batch_size=1200,
             nb_epoch=500,
             earlystop=None,
             transferlayer=1,
             weights=None,
             forkinas=False,
             compiletimes=0,
             compilemodels=None,
             predict=False):
    input_row = trainX.shape[2]
    input_col = trainX.shape[3]

    trainX_t = trainX
    valX_t = valX
    if (earlystop is not None):
        early_stopping = EarlyStopping(monitor='val_loss', patience=earlystop)
        nb_epoch = 10000
        #set to a very big value since earlystop used

    trainX_t.shape = (trainX_t.shape[0], input_row, input_col)
    if (valX is not None):
        valX_t.shape = (valX_t.shape[0], input_row, input_col)

    if compiletimes == 0:
        input = Input(shape=(input_row, input_col))
        filtersize1 = 1
        filtersize2 = 9
        filtersize3 = 10
        filter1 = 200
        filter2 = 150
        filter3 = 200
        dropout1 = 0.75
        dropout2 = 0.75
        dropout4 = 0.75
        dropout5 = 0.75
        dropout6 = 0
        L1CNN = 0
        nb_classes = 2
        batch_size = 1200
        actfun = "relu"
        optimization = 'adam'
        attentionhidden_x = 10
        attentionhidden_xr = 8
        attention_reg_x = 0.151948
        attention_reg_xr = 2
        dense_size1 = 149
        dense_size2 = 8
        dropout_dense1 = 0.298224
        dropout_dense2 = 0

        input = Input(shape=(input_row, input_col))
        x = conv.Convolution1D(filter1,
                               filtersize1,
                               init='he_normal',
                               W_regularizer=l1(L1CNN),
                               border_mode="same")(input)
        x = Dropout(dropout1)(x)
        x = Activation(actfun)(x)
        x = conv.Convolution1D(filter2,
                               filtersize2,
                               init='he_normal',
                               W_regularizer=l1(L1CNN),
                               border_mode="same")(x)
        x = Dropout(dropout2)(x)
        x = Activation(actfun)(x)
        x = conv.Convolution1D(filter3,
                               filtersize3,
                               init='he_normal',
                               W_regularizer=l1(L1CNN),
                               border_mode="same")(x)
        x = Activation(actfun)(x)
        x_reshape = core.Reshape((x._keras_shape[2], x._keras_shape[1]))(x)

        x = Dropout(dropout4)(x)
        x_reshape = Dropout(dropout5)(x_reshape)

        decoder_x = Attention(hidden=attentionhidden_x,
                              activation='linear',
                              init='he_normal',
                              W_regularizer=l1(attention_reg_x))  # success
        decoded_x = decoder_x(x)
        output_x = myFlatten(x._keras_shape[2])(decoded_x)

        decoder_xr = Attention(hidden=attentionhidden_xr,
                               activation='linear',
                               init='he_normal',
                               W_regularizer=l1(attention_reg_xr))
        decoded_xr = decoder_xr(x_reshape)
        output_xr = myFlatten(x_reshape._keras_shape[2])(decoded_xr)

        output = merge([output_x, output_xr], mode='concat')
        output = Dropout(dropout6)(output)
        output = Dense(dense_size1, init='he_normal',
                       activation='relu')(output)
        output = Dropout(dropout_dense1)(output)
        output = Dense(dense_size2, activation="relu",
                       init='he_normal')(output)
        output = Dropout(dropout_dense2)(output)
        out = Dense(nb_classes, init='he_normal', activation='softmax')(output)
        cnn = Model(input, out)
        cnn.compile(loss='binary_crossentropy',
                    optimizer=optimization,
                    metrics=['accuracy'])

    else:
        cnn = compilemodels

    if (predict is False):
        if (weights is not None and compiletimes == 0):  #for the first time
            print "load weights:" + weights
            if not forkinas:
                cnn.load_weights(weights)
            else:
                cnn2 = copy.deepcopy(cnn)
                cnn2.load_weights(weights)
                for l in range((len(cnn2.layers) -
                                transferlayer)):  #the last cnn is not included
                    cnn.layers[l].set_weights(cnn2.layers[l].get_weights())
                    #cnn.layers[l].trainable= False  # for frozen layer

        if (valX is not None):
            if (earlystop is None):
                fitHistory = cnn.fit(trainX_t,
                                     trainY,
                                     batch_size=batch_size,
                                     nb_epoch=nb_epoch,
                                     validation_data=(valX_t, valY))
            else:
                fitHistory = cnn.fit(trainX_t,
                                     trainY,
                                     batch_size=batch_size,
                                     nb_epoch=nb_epoch,
                                     validation_data=(valX_t, valY),
                                     callbacks=[early_stopping])
        else:
            fitHistory = cnn.fit(trainX_t,
                                 trainY,
                                 batch_size=batch_size,
                                 nb_epoch=nb_epoch)

    return cnn
示例#25
0
def unet_model_MultiScale():
    inputs = Input(config["input_shape"])
    conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv3D(32, (3, 3, 3), padding='same')(conv1)
    conv1 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv1)
    conv1 = core.Activation('relu')(conv1)
    pool1 = MaxPooling3D(pool_size=config["pool_size"])(conv1)

    conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv3D(64, (3, 3, 3), padding='same')(conv2)
    conv2 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv2)
    conv2 = core.Activation('relu')(conv2)

    pool2_1 = MaxPooling3D(pool_size=config["pool_size"])(conv2)
    conv3_1 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(pool2_1)
    conv3_1 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(conv3_1)

    pool2_2 = MaxPooling3D(pool_size=(4, 4, 4))(conv2)
    conv3_2 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(pool2_2)
    conv3_2 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(conv3_2)

    fuse = concatenate(
        [UpSampling3D(size=config["pool_size"])(conv3_2), conv3_1], axis=1)
    conv3_f = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(fuse)

    up4 = concatenate([UpSampling3D(size=config["pool_size"])(conv3_f), conv2],
                      axis=1)
    conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up4)
    conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv4)

    up5 = concatenate([UpSampling3D(size=config["pool_size"])(conv4), conv1],
                      axis=1)
    conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(up5)
    conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(conv5)

    conv6 = Conv3D(config["n_labels"], (1, 1, 1))(conv5)
    conv6 = core.Reshape((1, out_w * out_w * out_w))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    #conv6 =
    act = Activation('sigmoid')(conv6)
    model = Model(inputs=inputs, outputs=act)

    #model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss='categorical_crossentropy',metrics=['fbeta_score'])
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])
    return model
示例#26
0
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 3))

        block1 = self._ResBlock(inputs, 32)  #128 128 32
        pool1 = MaxPooling2D(strides=(2, 2))(block1)

        block2 = self._ResBlock(pool1, 32)  # 64 64 32
        pool2 = MaxPooling2D(strides=(2, 2))(block2)

        block3 = self._ResBlock(pool2, 64)  # 32 32 64
        pool3 = MaxPooling2D(strides=(2, 2))(block3)

        block4_1 = self._ResBlock(pool3, 128, 1)  #16 16 128
        block4_2 = self._ResBlock(block4_1, 128, 2)
        block4 = self._ResBlock(block4_2, 128, 4)

        aspp = self._aspp(block4)

        decode_16 = Conv2DTranspose(64, (3, 3),
                                    strides=(4, 4),
                                    activation="relu",
                                    padding="same")(block4)
        decode_32 = Conv2DTranspose(64, (3, 3),
                                    strides=(2, 2),
                                    activation="relu",
                                    padding="same")(block3)
        up1 = concatenate([block2, decode_16, decode_32], axis=3)

        aspp = Conv2DTranspose(32, (3, 3),
                               strides=(4, 4),
                               activation="relu",
                               padding="same")(aspp)
        up2 = concatenate([aspp, up1], axis=3)

        decode = Conv2D(32, (3, 3), strides=(1, 1), padding="same")(up2)
        decode = BatchNormalization()(decode)
        decode = Activation('relu')(decode)

        decode2 = Conv2DTranspose(32, (3, 3),
                                  strides=(2, 2),
                                  activation="relu",
                                  padding="same")(decode)

        decode3 = concatenate([decode2, block1], axis=3)

        conv8 = Conv2D(self.num_seg_class + 1, (1, 1),
                       activation='relu',
                       padding='same')(decode3)
        conv8 = core.Reshape((self.patch_height * self.patch_width,
                              (self.num_seg_class + 1)))(conv8)
        ############
        act = Activation('softmax')(conv8)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])
        plot_model(model,
                   to_file=os.path.join(self.config.checkpoint,
                                        "deeplabv3+.png"),
                   show_shapes=True)
        self.model = model
示例#27
0
def get_gnet(n_ch,patch_height,patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    up1 = UpSampling2D(size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv2)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool1)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv3)
    #
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool2)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv4)
    #
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool3)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv6)
    #
    up3 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up3)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv7)
    #
    up4 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up4)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv8)
    #
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool4)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
    #
    conv10 = Convolution2D(2, 1, 1, activation='relu', border_mode='same')(conv9)
    conv10 = core.Reshape((2,patch_height*patch_width))(conv10)
    conv10 = core.Permute((2,1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])
    x, y = imageSegmentationGenerator(cfg.train_images, cfg.train_annotations, cfg.train_batch_size,
                                      cfg.n_classes, cfg.input_height, cfg.input_width, cfg.output_height,
                                      cfg.output_width)
    model.fit(
        x, y,
        steps_per_epoch=int(cfg.train_data_number / cfg.train_batch_size),
        max_queue_size=8, workers=4, validation_data=5, epochs=cfg.epochs
    )
    # return model
示例#28
0
def get_dilated_unet(n_ch, patch_height, patch_width, dilaterate=3):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv5)
    #
    conv6 = Conv2D(2, (1, 1), activation='relu', padding='same', data_format='channels_first')(conv5)
    conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)
    # scheduler = LearningRateScheduler(mlr.lr_scheduler)
    sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
    model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
    # adam=optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
    # model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
    # 1、目标函数
    # (1)mean_squared_error / mse 均方误差,常用的目标函数,公式为((y_pred-y_true) ** 2).mean()
    # (2)mean_absolute_error / mae绝对值均差,公式为( | y_pred - y_true |).mean()
    # (3)mean_absolute_percentage_error / mape公式为:(| (y_true - y_pred) / clip((| y_true |), epsilon, infinite) |).mean(axis=-1) * 100,和mae的区别就是,累加的是(预测值与实际值的差)除以(剔除不介于epsilon和infinite之间的实际值),然后求均值。
    # (4)mean_squared_logarithmic_error / msle公式为: (log(clip(y_pred, epsilon, infinite) + 1) - log(clip(y_true, epsilon, infinite) + 1.)) ^ 2.mean(axis=-1),这个就是加入了log对数,剔除不介于epsilon和infinite之间的预测值与实际值之后,然后取对数,作差,平方,累加求均值。
    # (5)squared_hinge公式为:(max(1 - y_truey_pred, 0)) ^ 2.mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的平方的累加均值。
    # (6)hinge公式为:(max(1 - y_truey_pred, 0)).mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的的累加均值。
    # (7)binary_crossentropy: 常说的逻辑回归, 就是常用的交叉熵函
    # (8)categorical_crossentropy: 多分类的逻辑
    #
    # 2、性能评估函数:
    # (1)binary_accuracy: 对二分类问题, 计算在所有预测值上的平均正确率
    # (2)categorical_accuracy: 对多分类问题, 计算再所有预测值上的平均正确率
    # (3)sparse_categorical_accuracy: 与categorical_accuracy相同, 在对稀疏的目标值预测时有用
    # (4)top_k_categorical_accracy: 计算top - k正确率, 当预测值的前k个值中存在目标类别即认为预测正确
    # (5)sparse_top_k_categorical_accuracy:与top_k_categorical_accracy作用相同,但适用于稀疏情况
    return model
示例#29
0
def get_gnet(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    up1 = UpSampling2D(size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv3)
    #
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv4)
    #
    conv5 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv5)
    #
    up2 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv6)
    #
    up3 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up3)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv7)
    #
    up4 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up4)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(conv8)
    #
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv9)
    #
    conv10 = Convolution2D(2, 1, 1, activation='relu',
                           border_mode='same')(conv9)
    conv10 = core.Reshape((2, patch_height * patch_width))(conv10)
    conv10 = core.Permute((2, 1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 3))
        conv = Conv2D(16, (3, 3), padding='same')(inputs)  # 'valid'
        conv = LeakyReLU(alpha=0.3)(conv)

        conv1 = self.UnetConv2D(conv, 32, is_batchnorm=True)  # 32 128
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = self.UnetConv2D(pool1, 32, is_batchnorm=True)  # 32 64
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = self.UnetConv2D(pool2, 64, is_batchnorm=True)  # 64 32
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = self.UnetConv2D(pool3, 64, is_batchnorm=True)  # 64 16
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        center = self.UnetConv2D(pool4, 128, is_batchnorm=True)  # 128 8
        gating = self.UnetGatingSignal(center, is_batchnorm=True)

        attn_1 = self.AttnGatingBlock(conv4, gating, 128)
        up1 = concatenate([
            Conv2DTranspose(64, (3, 3), strides=(2, 2),
                            padding='same')(center), attn_1
        ],
                          axis=3)

        attn_2 = self.AttnGatingBlock(conv3, gating, 64)
        up2 = concatenate([
            Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same')(up1),
            attn_2
        ],
                          axis=3)

        attn_3 = self.AttnGatingBlock(conv2, gating, 32)
        up3 = concatenate([
            Conv2DTranspose(32, (3, 3), strides=(2, 2), padding='same')(up2),
            attn_3
        ],
                          axis=3)

        up4 = concatenate([
            Conv2DTranspose(32,
                            (3, 3), strides=(2, 2), padding='same')(up3), conv1
        ],
                          axis=3)

        conv8 = Conv2D(self.num_seg_class + 1, (1, 1),
                       activation='relu',
                       padding='same')(up4)
        conv8 = core.Reshape((self.patch_height * self.patch_width,
                              (self.num_seg_class + 1)))(conv8)
        ############
        act = Activation('softmax')(conv8)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])
        plot_model(model,
                   to_file=os.path.join(self.config.checkpoint, "model.png"),
                   show_shapes=True)
        self.model = model