예제 #1
0
conv_model = MODELS.Sequential()
loc_model = MODELS.Sequential()
model = MODELS.Sequential()

if conv1:
    conv_model.add(
        CONV.Convolution2D(conv1_filters,
                           conv1_filter_size,
                           conv1_filter_size,
                           subsample=(conv1_stride, conv1_stride),
                           border_mode='valid',
                           input_shape=(prev_frames, image_size, image_size)))
    if pool1:
        conv_model.add(CONV.MaxPooling2D(pool_size=(pool1_size, pool1_size)))
    conv_model.add(CORE.Activation(conv1_act))
    conv_model.add(CORE.Flatten())
    conv_model.add(CORE.Dense(fc1_size))
    conv_model.add(CORE.Activation(fc1_act))
loc_model.add(CORE.Dense(fc1_size, input_shape=(prev_frames * 4, )))
loc_model.add(CORE.Activation(fc1_act))
#model.add(CONV.Convolution2D(conv2_filters, conv2_filter_size, conv2_filter_size, border_mode='valid'))
#model.add(CONV.MaxPooling2D(pool_size=(pool2_size, pool2_size)))
#model.add(CORE.Activation(conv2_act))
model.add(CORE.Merge([conv_model, loc_model], mode='concat'))
model.add(CORE.Dense(4, init='zero'))
model.add(CORE.Activation(fc2_act))

print 'Building bouncing MNIST generator'

from data_handler import *
예제 #2
0
 def test_activation(self):
     layer = core.Activation('linear')
     self._runner(layer)
예제 #3
0
def Unet(nClasses,
         optimizer=None,
         input_width=360,
         input_height=480,
         nChannels=1):

    inputs = Input((nChannels, input_height, input_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2],
                mode='concat',
                concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv4)

    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1],
                mode='concat',
                concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    conv6 = Convolution2D(nClasses,
                          1,
                          1,
                          activation='relu',
                          border_mode='same')(conv5)
    conv6 = core.Reshape((nClasses, input_height * input_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=['accuracy'])

    return model
예제 #4
0
def test_activation():
    layer = core.Activation('linear')
    _runner(layer)
예제 #5
0
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    #data_format:字符串,“channels_first”或“channels_last”之一,代表图像的通道维的位置。
    #以128x128的RGB图像为例,“channels_first”应将数据组织为(3,128,128),而“channels_last”应将数据组织为(128,128,3)。该参数的默认值是~/.keras/keras.json中设置的值,若从未设置过,则为“channels_last”。
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    #
    #1×1的卷积的作用
    #大概有两个方面的作用:1. 实现跨通道的交互和信息整合2. 进行卷积核通道数的降维和升维。
    conv6 = Conv2D(2, (1, 1),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    conv6 = core.Reshape((2, patch_height * patch_width))(
        conv6)  #此时output的shape是(batchsize,2,patch_height*patch_width)
    conv6 = core.Permute((2, 1))(
        conv6
    )  #此时output的shape是(Npatch,patch_height*patch_width,2)即输出维度是(Npatch,2304,2)
    ############
    conv7 = core.Activation('softmax')(conv6)
    model = Model(inputs=inputs, outputs=conv7)
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer=Adam(lr=0.001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
    '''
예제 #6
0
파일: fuck_net.py 프로젝트: qq191513/mySeg
def get_gnet(n_ch,patch_height,patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    up1 = UpSampling2D(size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv2)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool1)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv3)
    #
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool2)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv4)
    #
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool3)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv6)
    #
    up3 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up3)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv7)
    #
    up4 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up4)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv8)
    #
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool4)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
    #
    conv10 = Convolution2D(2, 1, 1, activation='relu', border_mode='same')(conv9)
    conv10 = core.Reshape((2,patch_height*patch_width))(conv10)
    conv10 = core.Permute((2,1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])
    x, y = imageSegmentationGenerator(cfg.train_images, cfg.train_annotations, cfg.train_batch_size,
                                      cfg.n_classes, cfg.input_height, cfg.input_width, cfg.output_height,
                                      cfg.output_width)
    model.fit(
        x, y,
        steps_per_epoch=int(cfg.train_data_number / cfg.train_batch_size),
        max_queue_size=8, workers=4, validation_data=5, epochs=cfg.epochs
    )
    # return model
trainX = train[:, 1:].reshape(train.shape[0], 1, img_rows, img_cols)
trainX = trainX.astype(float)
trainX /= 255.0

trainY = kutils.to_categorical(train[:, 0])
nb_classes = trainY.shape[1]

cnn = models.Sequential()

cnn.add(conv.ZeroPadding2D(
    (1, 1),
    input_shape=(1, img_rows, img_cols),
))

cnn.add(conv.Convolution2D(filters[0], kernel, kernel))
cnn.add(core.Activation('relu'))
cnn.add(conv.MaxPooling2D(strides=(pool, pool)))

cnn.add(conv.ZeroPadding2D((1, 1)))

cnn.add(conv.Convolution2D(filters[1], kernel, kernel))
cnn.add(core.Activation('relu'))
cnn.add(conv.MaxPooling2D(strides=(pool, pool)))

cnn.add(conv.ZeroPadding2D((1, 1)))

cnn.add(core.Flatten())
cnn.add(core.Dropout(0.5))
cnn.add(core.Dense(128))
cnn.add(core.Activation('relu'))
cnn.add(core.Dense(nb_classes))
예제 #8
0
    train_num = int(len(data) * p)
    train_data = data.iloc[:train_num, :]
    test_data = data.iloc[:(len(data) - train_num), :]
    #
    X_train = train_data.iloc[:, :-1]
    y_train = train_data.iloc[:, -1]
    X_test = test_data.iloc[:, :-1]
    y_test = test_data.iloc[:, -1]
    # print(test_data)

    # 建立模型
    model = models.Sequential()
    # 添加输入层(3个点)到 隐藏层(10个点)的连接
    model.add(core.Dense(input_dim=3, units=10))
    # 隐藏层使用relu激活函数
    model.add(core.Activation('relu'))
    # 添加隐藏层(10个点)到输出层(1个点)的连接
    model.add(core.Dense(input_dim=10, units=1))
    # 输出层使用sigmoid激活函数
    model.add(core.Activation('sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    # 训练模型
    model.fit(X_train, y_train, epochs=100, batch_size=1)
    model.save_weights('./net.model')

    predict_result = model.predict_classes(X_train).reshape(len(X_train))

    plt = cm_plot(y_train, predict_result)
    plt.savefig('{0}.png'.format('./img/Figure_02_1'))
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    # encoding path
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv1)  # 32*48*48
    #
    pool1 = MaxPooling2D((2, 2), data_format='channels_first')(conv1)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv2)  # 64*24*24
    #
    pool2 = MaxPooling2D((2, 2), data_format='channels_first')(conv2)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name='conv3')(conv3)  # 128*12*12

    vgg = Model(inputs, conv3)
    vgg.load_weights(VGG_PATH, by_name=True)

    # decoding + concat path
    up1 = UpSampling2D(size=(2, 2),
                       data_format='channels_first')(conv3)  # 128*24*24
    up1 = Conv2D(64, (3, 3), padding='same',
                 data_format='channels_first')(up1)  # 64*24*24
    up1 = Dropout(0.2)(up1)
    up1 = Activation('relu')(up1)
    x2 = Attention_block(up1, conv2, 32)
    up1 = Concatenate(axis=1)([x2, up1])
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv4)
    up2 = Conv2D(32, (3, 3), padding='same', data_format='channels_first')(up2)
    up2 = Dropout(0.2)(up2)
    up2 = Activation('relu')(up2)
    x1 = Attention_block(up2, conv1, 16)
    up1 = Concatenate(axis=1)([x1, up2])
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)  # 32*48*48
    #
    conv6 = Conv2D(2, (1, 1),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)  # 2*48*48
    conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    #sgd = SGD(lr=0.001, decay=1e-6, momentum=0.3, nesterov=False)
    #optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    #model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    model.compile(optimizer=Adam(lr=1e-3),
                  loss=losses.binary_crossentropy,
                  metrics=['accuracy'])

    return model
    data_train = data.loc[1994:2013].copy()
    # 数据标准化
    data_mean = data_train.mean()
    data_std = data_train.std()
    data_train = (data_train - data_mean) / data_std
    # 取出数据作为训练数据
    X_train = data_train[feature]
    # 取出y值
    y_train = data_train['y']

    # 建立模型
    model = models.Sequential()
    # 添加输入层(6个点)到 隐藏层(12个点)的连接
    model.add(core.Dense(input_dim=6, units=12))
    # 隐藏层使用relu激活函数
    model.add(core.Activation('relu'))
    # 添加隐藏层(12个点)到输出层(1个点)的连接
    model.add(core.Dense(input_dim=12, units=1))
    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    # 训练模型
    model.fit(X_train, y_train, epochs=10000, batch_size=32, verbose=1)
    # 保存模型参数
    model.save_weights('./temp/4-net.model')

    # 预测 并还原结果
    x = (data[feature] - data_mean[feature]) / data_std[feature]
    data['y_pred'] = model.predict(x) * data_std['y'] + data_mean['y']
    p = data[['y', 'y_pred']].plot(subplots=True, style=['b-o', 'r-*'])
    plt.savefig('{0}.png'.format('./img/Figure_04'))
예제 #11
0
               padding='same',
               data_format='channels_first')(up2)
conv5 = Dropout(0.2)(conv5)
conv5 = Conv2D(32, (3, 3),
               activation='relu',
               padding='same',
               data_format='channels_first')(conv5)

conv6 = Conv2D(2, (1, 1),
               activation='relu',
               padding='same',
               data_format='channels_first')(conv5)
conv6 = core.Reshape((2, patch_size**2))(conv6)
conv6 = core.Permute((2, 1))(conv6)

conv7 = core.Activation('softmax')(conv6)

model = Model(input=inputs, output=conv7)
model.compile(optimizer='sgd', loss=loss_name, metrics=['accuracy'])

# Custom callback --------------------------------------------------------------
output_dir = 'output_' + str(patch_size) + '-' + str(stride) + '-' + str(n_epochs) + '-' \
             + str(learning_rate) + '-' + loss_name
output_dir_1000 = 'output_' + str(patch_size) + '-' + str(stride) + '-' + str(n_epochs) + '-' \
                  + str(learning_rate) + '-' + loss_name + '-times1000'
if not os.path.isdir(output_dir): os.mkdir(output_dir)
if not os.path.isdir(output_dir_1000): os.mkdir(output_dir_1000)


class SaveReconstructions(keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs={}):
예제 #12
0
def DUnet(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))

    conv1 = CONV(inputs, 8)
    dens1_48 = Conv2D(4, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      data_format='channels_first')(conv1)
    dens1_24 = Conv2D(4, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      data_format='channels_first')(conv1)
    dens1_12 = Conv2D(4, (1, 1),
                      strides=(4, 4),
                      padding='same',
                      data_format='channels_first')(conv1)
    dens1_6 = Conv2D(4, (1, 1),
                     strides=(8, 8),
                     padding='same',
                     data_format='channels_first')(conv1)

    pool1 = MaxPooling2D((2, 2))(conv1)
    conv2 = CONV(pool1, 16)
    dens2_48 = Conv2DTranspose(4, (1, 1),
                               strides=(2, 2),
                               padding='same',
                               data_format='channels_first')(conv2)
    dens2_24 = Conv2D(4, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      data_format='channels_first')(conv2)
    dens2_12 = Conv2D(4, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      data_format='channels_first')(conv2)
    dens2_6 = Conv2D(4, (1, 1),
                     strides=(4, 4),
                     padding='same',
                     data_format='channels_first')(conv2)

    pool2 = MaxPooling2D((2, 2))(conv2)
    conc1 = concatenate([pool2, dens1_12], axis=1)
    conv3 = CONV(conc1, 32)
    dens3_48 = Conv2DTranspose(4, (1, 1),
                               strides=(4, 4),
                               padding='same',
                               data_format='channels_first')(conv3)
    dens3_24 = Conv2DTranspose(4, (1, 1),
                               strides=(2, 2),
                               padding='same',
                               data_format='channels_first')(conv3)
    dens3_12 = Conv2D(4, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      data_format='channels_first')(conv3)

    pool3 = MaxPooling2D((2, 2))(conv3)
    conc2 = concatenate([pool3, dens1_6, dens2_6], axis=1)
    conv4 = CONV(conc2, 64)
    dens4_48 = Conv2DTranspose(4, (1, 1),
                               strides=(8, 8),
                               padding='same',
                               data_format='channels_first')(conv4)
    dens4_24 = Conv2DTranspose(4, (1, 1),
                               strides=(4, 4),
                               padding='same',
                               data_format='channels_first')(conv4)

    up1 = UpSampling2D(size=(2, 2))(conv4)
    conc3 = concatenate([up1, dens1_12, dens2_12, dens3_12], axis=1)
    conv5 = CONV(conc3, 32)
    dens5_48 = Conv2DTranspose(4, (1, 1),
                               strides=(4, 4),
                               padding='same',
                               data_format='channels_first')(conv5)

    up2 = UpSampling2D(size=(2, 2))(conv5)
    conc4 = concatenate([up2, dens1_24, dens2_24, dens3_24, dens4_24], axis=1)
    conv6 = CONV(conc4, 16)

    up3 = UpSampling2D(size=(2, 2))(conv6)
    conc5 = concatenate(
        [up3, dens1_48, dens2_48, dens3_48, dens4_48, dens5_48], axis=1)
    conv7 = CONV(conc5, 8)

    conv8 = Conv2D(2, (1, 1),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv7)
    conv8 = core.Reshape((2, patch_height * patch_width))(conv8)
    conv8 = core.Permute((2, 1))(conv8)
    conv8 = core.Activation('softmax')(conv8)
    model = Model(inputs=inputs, outputs=conv8)

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #13
0
def Unet(nClasses, input_length,optimizer=None,  nChannels=1):
    inputs = Input((input_length, nChannels))
    # (None,1800,1)--->(None,1800,16)
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    # (None,1800,16)--->(None,1800,16)
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    # (None,1800,16)--->(None,900,16)
    pool1 = MaxPooling1D(pool_size=2)(conv1)

    # (None,900,16)--->(None,900,32)
    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    # (None,900,32)--->(None,900,32)
    conv2 = Dropout(0.2)(conv2)
    # (None,900,32)--->(None,900,32)
    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    # (None,900,32)--->(None,450,32)
    pool2 = MaxPooling1D(pool_size=2)(conv2)

    #(None, 450, 32)--->(None,450,64)
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    # (None, 450, 64)--->(None,450,64)
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    # (None, 450, 64)--->(None,225,64)
    pool3 = MaxPooling1D(pool_size=2)(conv3)

    #(None,225,64)--->(None,225,128)
    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    # (None,225,128)--->(None,225,128)
    conv4 = Dropout(0.5)(conv4)
    # (None,225,128)--->(None,225,128)
    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)

    #(None,225,128)--->(None,450,64)
    up1 = Conv1D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv4))
    #(None, 450, 64)--->(None,450,128)
    merge1 = concatenate([up1, conv3], axis=-1)
    # (None, 450, 64)--->(None,450,64)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
    # (None, 450, 64)--->(None,450,64)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)

    # (None, 450, 64)--->(None,900,32)
    up2 = Conv1D(32, 2, activation='relu', padding='same', kernel_initializer = 'he_normal')(UpSampling1D(size=2)(conv5))
    # (None, 900, 32)--->(None,900,64)
    merge2 = concatenate([up2, conv2], axis=-1)
    # (None, 900, 64)--->(None,900,32)
    conv6 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer = 'he_normal')(merge2)
    conv6 = Dropout(0.2)(conv6)
    # (None, 900, 32)--->(None,900,32)
    conv6 = Conv1D(32, 32, activation='relu', padding='same')(conv6)

    # (None, 900, 32)--->(None,1800,16)
    up3 = Conv1D(16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv6))
    #(None,1800,16)--->(None,1800,32)
    merge3 = concatenate([up3, conv1], axis=-1)
    #(None, 1800, 32)--->(None, 1800, 16)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
    #(None, 1800, 16)--->(None, 1800, 16)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)

    # (None, 1800, 16)--->(None, 1800, classes)
    conv8 = Conv1D(nClasses, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    #(None, 1800, classes)--->(None,3,1800)
    conv8 = core.Reshape((nClasses, input_length))(conv8)
    #(None, 3, 1800)--->(None, 1800, classes)
    conv8 = core.Permute((2, 1))(conv8)

    conv9 = core.Activation('softmax')(conv8)

    model = Model(inputs=inputs, outputs=conv9)
    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model
예제 #14
0
deconv_nir_3 = Conv2DTranspose(num_class*C,(4,4), strides=(2, 2), padding='same', data_format="channels_last", activation='relu',kernel_initializer='glorot_normal')(conv_nir_2)
conv_nir_3 = Conv2D(num_class*C, (3,3), strides=(1,1), padding = 'same', activation='relu', data_format='channels_last')(deconv_nir_3)
deconv_nir_4 = Conv2DTranspose(num_class*C,(4,4), strides=(2, 2), padding='same', data_format="channels_last", activation='relu',kernel_initializer='glorot_normal')(conv_nir_3)
conv_nir_4 = Conv2D(num_class*C, (3,3), strides=(1,1), padding = 'same', activation='relu', data_format='channels_last')(deconv_nir_4)
deconv_nir_5 = Conv2DTranspose(num_class*C,(4,4), strides=(2, 2), padding='same', data_format="channels_last", activation='relu',kernel_initializer='glorot_normal')(conv_nir_4)


# CONACTENATE the ends of RGB & NIR 
merge_rgb_nir = keras.layers.concatenate([deconv_rgb_5, deconv_nir_5], axis=-1)

# DECONVOLUTION Layers
deconv_last = Conv2DTranspose(num_class, (1,1), strides=(1, 1), padding='same', data_format="channels_last", activation='relu',kernel_initializer='glorot_normal') (merge_rgb_nir)

#VECTORIZING OUTPUT
out_reshape = core.Reshape((input_dim[0]*input_dim[1],num_class))(deconv_last)
out = core.Activation('softmax')(out_reshape)

# MODAL [INPUTS , OUTPUTS]
model = Model(inputs=[inputs_rgb,inputs_nir], outputs=[out])
print 'compiling'
model.compile(optimizer='sgd',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()


# Save the model according to the conditions  
progbar = ProgbarLogger(count_mode='steps')
checkpoint = ModelCheckpoint("nir_rgb_segmentation_2.{epoch:02d}.hdf5", monitor='val_acc', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
#early = EarlyStopping(monitor='val_acc', min_delta=0, patience=1, verbose=1, mode='auto')
#haven't specified validation data directory yet
예제 #15
0
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 2)),
              epochs=1,
              batch_size=10)

    # test config
    model.get_config()

    # test when specifying a batch_input_shape
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(core.Dense(2), batch_input_shape=(1, 3, 4)))
    reference.add(core.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Embedding
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(embeddings.Embedding(5, 6),
                                 batch_input_shape=(10, 3, 4),
                                 dtype='int32'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
              np.random.random((10, 3, 4, 6)),
              epochs=1,
              batch_size=10)

    # compare to not using batch_input_shape
    test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(embeddings.Embedding(5, 6),
                                 input_shape=(3, 4),
                                 dtype='int32'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Conv2D
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(convolutional.Conv2D(5, (2, 2),
                                                      padding='same'),
                                 input_shape=(2, 4, 4, 3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
                         np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(core.Dense(3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 3)),
              epochs=1,
              batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(core.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with BatchNormalization
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(normalization.BatchNormalization(center=True,
                                                                  scale=True),
                                 name='bn',
                                 input_shape=(10, 2)))
    model.compile(optimizer='rmsprop', loss='mse')
    # Assert that mean and variance are 0 and 1.
    td = model.layers[0]
    assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Train
    model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
                         np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
    # Assert that mean and variance changed.
    assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Verify input_map has one mapping from inputs to reshaped inputs.
    uid = _object_list_uid(model.inputs)
    assert len(td._input_map.keys()) == 1
    assert uid in td._input_map
    assert K.int_shape(td._input_map[uid]) == (None, 2)
예제 #16
0
def get_patches_unet5(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)

    conv2 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)

    conv3 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(conv3)
    pool3 = MaxPooling2D((2,2))(conv3)

    conv4 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(pool3)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(conv4)
    pool4 = MaxPooling2D((2,2))(conv4)

    conv5 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(pool4)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same',data_format='channels_first')(conv5)

    up1 = UpSampling2D(size=(2, 2))(conv5)
    up1 = concatenate([conv4,up1],axis=1)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same',data_format='channels_first')(up1)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv6)

    up2 = UpSampling2D(size=(2, 2))(conv6)
    up2 = concatenate([conv3,up2], axis=1)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(up2)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv7)

    up3 = UpSampling2D(size=(2,2))(conv7)
    up3 = concatenate([conv2,up3], axis=1)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(up3)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv8)

    up4 = UpSampling2D(size=(2,2))(conv8)
    up4 = concatenate([conv1,up4], axis=1)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(up4)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv9)

    conv10 = Conv2D(2, (1, 1), activation='relu',padding='same',data_format='channels_first')(conv9)
    conv10 = core.Reshape((2,patch_height*patch_width))(conv10)
    conv10 = core.Permute((2,1))(conv10)
    conv10 = core.Activation('softmax')(conv10)
    #conv10 = ThresholdedReLU(theta=0.25)(conv10)

    model = Model(inputs=inputs, outputs=conv10)
    # multi_model = multi_gpu_model(model, gpus=2)
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])

    return model
예제 #17
0
def get_dilated_unet(n_ch, patch_height, patch_width, dilaterate=3):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', dilation_rate=dilaterate,
                   data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv5)
    #
    conv6 = Conv2D(2, (1, 1), activation='relu', padding='same', data_format='channels_first')(conv5)
    conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)
    # scheduler = LearningRateScheduler(mlr.lr_scheduler)
    sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
    model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
    # adam=optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
    # model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
    # 1、目标函数
    # (1)mean_squared_error / mse 均方误差,常用的目标函数,公式为((y_pred-y_true) ** 2).mean()
    # (2)mean_absolute_error / mae绝对值均差,公式为( | y_pred - y_true |).mean()
    # (3)mean_absolute_percentage_error / mape公式为:(| (y_true - y_pred) / clip((| y_true |), epsilon, infinite) |).mean(axis=-1) * 100,和mae的区别就是,累加的是(预测值与实际值的差)除以(剔除不介于epsilon和infinite之间的实际值),然后求均值。
    # (4)mean_squared_logarithmic_error / msle公式为: (log(clip(y_pred, epsilon, infinite) + 1) - log(clip(y_true, epsilon, infinite) + 1.)) ^ 2.mean(axis=-1),这个就是加入了log对数,剔除不介于epsilon和infinite之间的预测值与实际值之后,然后取对数,作差,平方,累加求均值。
    # (5)squared_hinge公式为:(max(1 - y_truey_pred, 0)) ^ 2.mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的平方的累加均值。
    # (6)hinge公式为:(max(1 - y_truey_pred, 0)).mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的的累加均值。
    # (7)binary_crossentropy: 常说的逻辑回归, 就是常用的交叉熵函
    # (8)categorical_crossentropy: 多分类的逻辑
    #
    # 2、性能评估函数:
    # (1)binary_accuracy: 对二分类问题, 计算在所有预测值上的平均正确率
    # (2)categorical_accuracy: 对多分类问题, 计算再所有预测值上的平均正确率
    # (3)sparse_categorical_accuracy: 与categorical_accuracy相同, 在对稀疏的目标值预测时有用
    # (4)top_k_categorical_accracy: 计算top - k正确率, 当预测值的前k个值中存在目标类别即认为预测正确
    # (5)sparse_top_k_categorical_accuracy:与top_k_categorical_accracy作用相同,但适用于稀疏情况
    return model
예제 #18
0
def get_patches_unet4(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(filters=32, kernel_size=(3, 3), padding='same', data_format='channels_first')(inputs)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Activation('relu')(conv1)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(filters=32, kernel_size=(3, 3), padding='same', data_format='channels_first')(conv1)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Activation('relu')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)

    conv2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', data_format='channels_first')(pool1)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Activation('relu')(conv2)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', data_format='channels_first')(conv2)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Activation('relu')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)

    conv3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same', data_format='channels_first')(pool2)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Activation('relu')(conv3)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same', data_format='channels_first')(conv3)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Activation('relu')(conv3)
    pool3 = MaxPooling2D((2,2))(conv3)

    conv4 = Conv2D(filters=256, kernel_size=(3, 3), padding='same', data_format='channels_first')(pool3)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Activation('relu')(conv4)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(filters=256, kernel_size=(3, 3), padding='same', data_format='channels_first')(conv4)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Activation('relu')(conv4)

    up1 = UpSampling2D(size=(2, 2))(conv4)
    up1 = concatenate([conv3,up1], axis=1)
    conv5 = Conv2D(128, (3, 3), padding='same', data_format='channels_first')(up1)
    conv5 = BatchNormalization(axis=1)(conv5)
    conv5 = Activation('relu')(conv5)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(128, (3, 3), padding='same', data_format='channels_first')(conv5)
    conv5 = BatchNormalization(axis=1)(conv5)
    conv5 = Activation('relu')(conv5)

    up2 = UpSampling2D(size=(2,2))(conv5)
    up2 = concatenate([conv2,up2], axis=1)
    conv6 = Conv2D(64, (3, 3), padding='same', data_format='channels_first')(up2)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Activation('relu')(conv6)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Conv2D(64, (3, 3), padding='same', data_format='channels_first')(conv6)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Activation('relu')(conv6)

    up3 = UpSampling2D(size=(2,2))(conv6)
    up3 = concatenate([conv1,up3], axis=1)
    conv7 = Conv2D(32, (3, 3), padding='same', data_format='channels_first')(up3)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Activation('relu')(conv7)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Conv2D(32, (3, 3), padding='same', data_format='channels_first')(conv7)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Activation('relu')(conv7)

    # # Output shape as entire image
    # conv8 = Conv2D(2, (1, 1), padding='same', data_format='channels_first')(conv7)
    # conv8 = BatchNormalization(axis=1)(conv8)
    # conv8 = Activation('relu')(conv8)
    # conv8 = Conv2D(1, (1, 1), padding='same', data_format='channels_first')(conv8)
    # conv8 = BatchNormalization(axis=1)(conv8)
    # conv8 = Dense(1, data_format='channels_first')(conv8)
    # conv8 = core.Activation('softmax')(conv8)

    # Output shape as batches, height*width, 2
    conv8 = Conv2D(2, (1, 1), activation='relu',padding='same',data_format='channels_first')(conv7)
    conv8 = core.Reshape((2,patch_height*patch_width))(conv8)
    conv8 = core.Permute((2,1))(conv8)
    conv8 = core.Activation('softmax')(conv8)
    #
    model = Model(inputs=inputs, outputs=conv8)
    # multi_model = multi_gpu_model(model, gpus=2)
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
    # model.compile(optimizer='sgd', loss=dice_coef_loss , metrics=['accuracy', dice_coef, jaccard_coef])

    return model
예제 #19
0
               data_format='channels_first')(conv6)

conv7 = Conv2D(3, (1, 1),
               activation='relu',
               padding='same',
               data_format='channels_first')(conv6)
conv7 = core.Reshape((3, 128 * 128))(conv7)
#conv6 = core.Permute((3,1))(conv6)
conv7 = core.Flatten()(conv7)
#conv7 = core.Dense(64)(conv7)
#conv7 = core.Activation('relu')(conv7)
#conv7 = Dropout(0.2)(conv7)
conv7 = core.Dense(2)(conv7)

############
conv8 = core.Activation('softmax')(conv7)

model = Model(input=inputs, output=conv8)

# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

########################################
# Viewing model_configuration

model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
예제 #20
0
def get_patches_unet3(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(filters=32, kernel_size=(3, 3), padding='same', data_format='channels_first')(inputs)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Activation('relu')(conv1)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(filters=32, kernel_size=(3, 3), padding='same', data_format='channels_first')(conv1)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Activation('relu')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same',data_format='channels_first')(pool1)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Activation('relu')(conv2)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same',data_format='channels_first')(conv2)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Activation('relu')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same',data_format='channels_first')(pool2)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Activation('relu')(conv3)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same',data_format='channels_first')(conv3)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Activation('relu')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2,up1],axis=1)
    conv4 = Conv2D(filters=64, kernel_size=(3, 3), padding='same',data_format='channels_first')(up1)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Activation('relu')(conv4)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(filters=64, kernel_size=(3, 3), padding='same',data_format='channels_first')(conv4)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Activation('relu')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1,up2], axis=1)
    conv5 = Conv2D(filters=32, kernel_size=(3, 3),padding='same',data_format='channels_first')(up2)
    conv5 = BatchNormalization(axis=1)(conv5)
    conv5 = Activation('relu')(conv5)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(filters=32, kernel_size=(3, 3), padding='same',data_format='channels_first')(conv5)
    conv5 = BatchNormalization(axis=1)(conv5)
    conv5 = Activation('relu')(conv5)
    #
    conv6 = Conv2D(filters=2, kernel_size=(1, 1), activation='relu',padding='same',data_format='channels_first')(conv5)
    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)
    # multi_model = multi_gpu_model(model, gpus=2)
    # model = make_parallel(model, 2)
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    #model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])
    model.compile(optimizer='sgd', loss='binary_crossentropy',metrics=['accuracy'])

    #multi_model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])

    return model
예제 #21
0
def build_model(start_neurons,
                block="resnet",
                DropoutRatio=0.5,
                filter_size=32,
                nClasses=2):
    # 101 -> 50
    input_length = 2000
    input_layer = Input((input_length, 1))
    #     conv1 = headneck(input_layer)
    #     conv1 = Conv1D(start_neurons * 1, filter_size, activation=None, padding="same")(conv1)

    conv1 = Conv1D(start_neurons * 1,
                   filter_size,
                   activation=None,
                   padding="same")(input_layer)

    conv1 = bottleneck(conv1, start_neurons * 1, block)

    conv1 = Activation(ACTIVATION)(conv1)
    pool1 = MaxPooling1D((2))(conv1)
    pool1 = Dropout(DropoutRatio / 2)(pool1)

    # 50 -> 25
    conv2 = Conv1D(start_neurons * 2,
                   filter_size,
                   activation=None,
                   padding="same")(pool1)

    conv2 = bottleneck(conv2, start_neurons * 2, block)

    conv2 = Activation(ACTIVATION)(conv2)
    pool2 = MaxPooling1D((2))(conv2)
    pool2 = Dropout(DropoutRatio)(pool2)

    # 25 -> 12
    conv3 = Conv1D(start_neurons * 4,
                   filter_size,
                   activation=None,
                   padding="same")(pool2)

    conv3 = bottleneck(conv3, start_neurons * 4, block)

    conv3 = Activation(ACTIVATION)(conv3)
    pool3 = MaxPooling1D((2))(conv3)
    pool3 = Dropout(DropoutRatio)(pool3)

    # 12 -> 6
    conv4 = Conv1D(start_neurons * 8,
                   filter_size,
                   activation=None,
                   padding="same")(pool3)
    #     conv4 = residual_block(conv4,start_neurons * 8)
    #     conv4 = residual_block(conv4,start_neurons * 8)
    conv4 = bottleneck(conv4, start_neurons * 8, block)

    conv4 = Activation(ACTIVATION)(conv4)
    pool4 = MaxPooling1D((2))(conv4)
    pool4 = Dropout(DropoutRatio)(pool4)

    # Middle
    convm = Conv1D(start_neurons * 16,
                   filter_size,
                   activation=None,
                   padding="same")(pool4)
    #     convm = residual_block(convm,start_neurons * 16)
    #     convm = residual_block(convm,start_neurons * 16)
    convm = bottleneck(convm, start_neurons * 16, block)

    convm = Activation(ACTIVATION)(convm)

    # 6 -> 12
    #deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
    deconv4 = Conv1D(start_neurons * 8,
                     filter_size,
                     activation='relu',
                     padding='same')(UpSampling1D(
                         size=2)(convm))  #kernel_initializer='he_normal'

    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(DropoutRatio)(uconv4)

    uconv4 = Conv1D(start_neurons * 8,
                    filter_size,
                    activation=None,
                    padding="same")(uconv4)
    #     uconv4 = residual_block(uconv4,start_neurons * 8)
    #     uconv4 = residual_block(uconv4,start_neurons * 8)
    uconv4 = bottleneck(uconv4, start_neurons * 8, block)

    uconv4 = Activation(ACTIVATION)(uconv4)

    # 12 -> 25
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
    deconv3 = Conv1D(
        start_neurons * 4,
        filter_size,
        activation='relu',
        padding='same',
    )(UpSampling1D(size=2)(uconv4))  #kernel_initializer='he_normal'
    uconv3 = concatenate([deconv3, conv3])
    uconv3 = Dropout(DropoutRatio)(uconv3)

    uconv3 = Conv1D(start_neurons * 4,
                    filter_size,
                    activation=None,
                    padding="same")(uconv3)
    #     uconv3 = residual_block(uconv3,start_neurons * 4)
    #     uconv3 = residual_block(uconv3,start_neurons * 4)
    uconv3 = bottleneck(uconv3, start_neurons * 4, block)

    uconv3 = Activation(ACTIVATION)(uconv3)

    # 25 -> 50
    #deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
    deconv2 = Conv1D(
        start_neurons * 2,
        filter_size,
        activation='relu',
        padding='same',
    )(UpSampling1D(size=2)(uconv3))  #kernel_initializer='he_normal'
    uconv2 = concatenate([deconv2, conv2])

    uconv2 = Dropout(DropoutRatio)(uconv2)
    uconv2 = Conv1D(start_neurons * 2,
                    filter_size,
                    activation=None,
                    padding="same")(uconv2)
    #     uconv2 = residual_block(uconv2,start_neurons * 2)
    #     uconv2 = residual_block(uconv2,start_neurons * 2)
    uconv2 = bottleneck(uconv2, start_neurons * 2, block)

    uconv2 = Activation(ACTIVATION)(uconv2)

    # 50 -> 101
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
    deconv1 = Conv1D(
        start_neurons * 1,
        filter_size,
        activation='relu',
        padding='same',
    )(UpSampling1D(size=2)(uconv2))  #kernel_initializer='he_normal'
    uconv1 = concatenate([deconv1, conv1])

    uconv1 = Dropout(DropoutRatio)(uconv1)
    uconv1 = Conv1D(start_neurons * 1,
                    filter_size,
                    activation=None,
                    padding="same")(uconv1)
    #     uconv1 = residual_block(uconv1,start_neurons * 1)
    #     uconv1 = residual_block(uconv1,start_neurons * 1)
    uconv1 = bottleneck(uconv1, start_neurons * 1, block)

    uconv1 = Activation(ACTIVATION)(uconv1)

    uconv1 = Dropout(DropoutRatio / 2)(uconv1)
    #output_layer = Conv1D(1, 1, padding="same", activation="sigmoid")(uconv1)
    output_layer = Conv1D(nClasses, 1, activation='relu', padding='same')(
        uconv1)  #kernel_initializer='he_normal'
    #output_layer = core.Reshape((nClasses, input_length))(output_layer)
    #output_layer = core.Permute((2, 1))(output_layer)
    output_layer = core.Activation('softmax')(output_layer)
    #model = Model(inputs=inputs, outputs=conv9)

    model = Model(input_layer, output_layer)

    return model
ds = dataset.DataSet.load_from_path('usps', '../gmllib/datasets/usps')

# convert to 2D images
x_train = np.reshape(ds.train.x, (ds.train.N, 1, 16, 16))
x_test = np.reshape(ds.test.x, (ds.test.N, 1, 16, 16))

model = kmodel.Sequential()

model.add(
    kconv.Convolution2D(nb_filter=4,
                        nb_row=5,
                        nb_col=5,
                        input_shape=(1, 16, 16),
                        border_mode='valid'))
model.add(klcore.Activation('tanh'))
# instead of average pooling, we use max pooling
model.add(kconv.MaxPooling2D(pool_size=(2, 2)))

# the 12 feature maps in this layer are connected in a specific pattern to the below layer, but it is not possible
# do this in keras easily. in fact, I don't know how keras connects the feature maps in one layer to the next.
model.add(kconv.Convolution2D(nb_filter=12, nb_row=5, nb_col=5))
model.add(klcore.Activation('tanh'))
model.add(kconv.MaxPooling2D(pool_size=(2, 2)))

model.add(klcore.Flatten())
model.add(klcore.Dense(output_dim=10))
model.add(klcore.Activation('softmax'))

model.compile(optimizer='sgd', loss='categorical_crossentropy')
예제 #23
0
class LayerCorrectnessTest(test_combinations.TestCase):
    def setUp(self):
        super(LayerCorrectnessTest, self).setUp()
        # Set two virtual CPUs to test MirroredStrategy with multiple devices
        cpus = tf.config.list_physical_devices('CPU')
        tf.config.set_logical_device_configuration(cpus[0], [
            tf.config.LogicalDeviceConfiguration(),
            tf.config.LogicalDeviceConfiguration(),
        ])

    def _create_model_from_layer(self, layer, input_shapes):
        inputs = [layers.Input(batch_input_shape=s) for s in input_shapes]
        if len(inputs) == 1:
            inputs = inputs[0]
        y = layer(inputs)
        model = models.Model(inputs, y)
        model.compile('sgd', 'mse')
        return model

    @parameterized.named_parameters(
        ('LeakyReLU', activation.LeakyReLU, (2, 2)),
        ('PReLU', activation.PReLU, (2, 2)), ('ELU', activation.ELU, (2, 2)),
        ('ThresholdedReLU', activation.ThresholdedReLU,
         (2, 2)), ('Softmax', activation.Softmax,
                   (2, 2)), ('ReLU', activation.ReLU, (2, 2)),
        ('Conv1D', lambda: convolutional.Conv1D(2, 2), (2, 2, 1)),
        ('Conv2D', lambda: convolutional.Conv2D(2, 2), (2, 2, 2, 1)),
        ('Conv3D', lambda: convolutional.Conv3D(2, 2), (2, 2, 2, 2, 1)),
        ('Conv2DTranspose', lambda: convolutional.Conv2DTranspose(2, 2),
         (2, 2, 2, 2)),
        ('SeparableConv2D', lambda: convolutional.SeparableConv2D(2, 2),
         (2, 2, 2, 1)),
        ('DepthwiseConv2D', lambda: convolutional.DepthwiseConv2D(2, 2),
         (2, 2, 2, 1)), ('UpSampling2D', reshaping.UpSampling2D, (2, 2, 2, 1)),
        ('ZeroPadding2D', reshaping.ZeroPadding2D,
         (2, 2, 2, 1)), ('Cropping2D', reshaping.Cropping2D, (2, 3, 3, 1)),
        ('ConvLSTM2D', lambda: conv_lstm2d.ConvLSTM2D(4, kernel_size=(2, 2)),
         (4, 4, 4, 4, 4)), ('Dense', lambda: core.Dense(2), (2, 2)),
        ('Dropout', lambda: regularization.Dropout(0.5), (2, 2)),
        ('SpatialDropout2D', lambda: regularization.SpatialDropout2D(0.5),
         (2, 2, 2, 2)), ('Activation', lambda: core.Activation('sigmoid'),
                         (2, 2)), ('Reshape', lambda: reshaping.Reshape(
                             (1, 4, 1)), (2, 2, 2)),
        ('Permute', lambda: reshaping.Permute(
            (2, 1)), (2, 2, 2)), ('Attention', attention.Attention, [
                (2, 2, 3), (2, 3, 3), (2, 3, 3)
            ]), ('AdditiveAttention', attention.AdditiveAttention, [
                (2, 2, 3), (2, 3, 3), (2, 3, 3)
            ]), ('Embedding', lambda: embeddings.Embedding(4, 4),
                 (2, 4), 2e-3, 2e-3, np.random.randint(4, size=(2, 4))),
        ('LocallyConnected1D',
         lambda: locally_connected.LocallyConnected1D(2, 2),
         (2, 2, 1)), ('LocallyConnected2D',
                      lambda: locally_connected.LocallyConnected2D(2, 2),
                      (2, 2, 2, 1)), ('Add', merging.Add, [(2, 2), (2, 2)]),
        ('Subtract', merging.Subtract, [(2, 2), (2, 2)]),
        ('Multiply', merging.Multiply, [
            (2, 2), (2, 2)
        ]), ('Average', merging.Average, [(2, 2), (2, 2)]),
        ('Maximum', merging.Maximum, [
            (2, 2), (2, 2)
        ]), ('Minimum', merging.Minimum, [
            (2, 2), (2, 2)
        ]), ('Concatenate', merging.Concatenate, [
            (2, 2), (2, 2)
        ]), ('Dot', lambda: merging.Dot(1), [(2, 2), (2, 2)]),
        ('GaussianNoise', lambda: regularization.GaussianNoise(0.5), (2, 2)),
        ('GaussianDropout', lambda: regularization.GaussianDropout(0.5),
         (2, 2)), ('AlphaDropout', lambda: regularization.AlphaDropout(0.5),
                   (2, 2)),
        ('BatchNormalization', batch_normalization.BatchNormalization,
         (2, 2), 1e-2, 1e-2),
        ('LayerNormalization', layer_normalization.LayerNormalization,
         (2, 2)), ('LayerNormalizationUnfused',
                   lambda: layer_normalization.LayerNormalization(axis=1),
                   (2, 2, 2)), ('MaxPooling2D', pooling.MaxPooling2D,
                                (2, 2, 2, 1)),
        ('AveragePooling2D', pooling.AveragePooling2D,
         (2, 2, 2, 1)), ('GlobalMaxPooling2D', pooling.GlobalMaxPooling2D,
                         (2, 2, 2, 1)),
        ('GlobalAveragePooling2D', pooling.GlobalAveragePooling2D,
         (2, 2, 2, 1)), ('SimpleRNN', lambda: simple_rnn.SimpleRNN(units=4),
                         (4, 4, 4), 1e-2, 1e-2),
        ('SimpleRNN_stateful',
         lambda: simple_rnn.SimpleRNN(units=4, stateful=True), (4, 4, 4), 1e-2,
         1e-2), ('GRU', lambda: gru_v1.GRU(units=4),
                 (4, 4, 4)), ('LSTM', lambda: lstm_v1.LSTM(units=4),
                              (4, 4, 4)), ('GRUV2', lambda: gru.GRU(units=4),
                                           (4, 4, 4)),
        ('GRUV2_stateful', lambda: gru.GRU(units=4, stateful=True),
         (4, 4, 4)), ('LSTMV2', lambda: lstm.LSTM(units=4), (4, 4, 4)),
        ('LSTMV2_stateful', lambda: lstm.LSTM(units=4, stateful=True),
         (4, 4, 4)), ('TimeDistributed',
                      lambda: time_distributed.TimeDistributed(core.Dense(2)),
                      (2, 2, 2)),
        ('Bidirectional',
         lambda: bidirectional.Bidirectional(simple_rnn.SimpleRNN(units=4)),
         (2, 2, 2)),
        ('AttentionLayerCausal', lambda: attention.Attention(causal=True), [
            (2, 2, 3), (2, 3, 3), (2, 3, 3)
        ]), ('AdditiveAttentionLayerCausal',
             lambda: attention.AdditiveAttention(causal=True), [
                 (2, 3, 4), (2, 3, 4), (2, 3, 4)
             ]), ('NormalizationAdapt', _create_normalization_layer_with_adapt,
                  (4, 4)),
        ('NormalizationNoAdapt', _create_normalization_layer_without_adapt,
         (4, 4)), ('Resizing', lambda: image_preprocessing.Resizing(3, 3),
                   (2, 5, 5, 1)),
        ('Rescaling', lambda: image_preprocessing.Rescaling(2., 1.),
         (6, 6)), ('CenterCrop', lambda: image_preprocessing.CenterCrop(3, 3),
                   (2, 5, 5, 1)))
    def test_layer(self,
                   f32_layer_fn,
                   input_shape,
                   rtol=2e-3,
                   atol=2e-3,
                   input_data=None):
        """Tests a layer by comparing the float32 and mixed precision weights.

    A float32 layer, a mixed precision layer, and a distributed mixed precision
    layer are run. The three layers are identical other than their dtypes and
    distribution strategies. The outputs after predict() and weights after fit()
    are asserted to be close.

    Args:
      f32_layer_fn: A function returning a float32 layer. The other two layers
        will automatically be created from this
      input_shape: The shape of the input to the layer, including the batch
        dimension. Or a list of shapes if the layer takes multiple inputs.
      rtol: The relative tolerance to be asserted.
      atol: The absolute tolerance to be asserted.
      input_data: A Numpy array with the data of the input. If None, input data
        will be randomly generated
    """

        if f32_layer_fn == reshaping.ZeroPadding2D and tf.test.is_built_with_rocm(
        ):
            return
        if isinstance(input_shape[0], int):
            input_shapes = [input_shape]
        else:
            input_shapes = input_shape
        strategy = create_mirrored_strategy()
        f32_layer = f32_layer_fn()

        # Create the layers
        assert f32_layer.dtype == f32_layer._compute_dtype == 'float32'
        config = f32_layer.get_config()
        config['dtype'] = policy.Policy('mixed_float16')
        mp_layer = f32_layer.__class__.from_config(config)
        distributed_mp_layer = f32_layer.__class__.from_config(config)

        # Compute per_replica_input_shapes for the distributed model
        global_batch_size = input_shapes[0][0]
        assert global_batch_size % strategy.num_replicas_in_sync == 0, (
            'The number of replicas, %d, does not divide the global batch size of '
            '%d' % (strategy.num_replicas_in_sync, global_batch_size))
        per_replica_batch_size = (global_batch_size //
                                  strategy.num_replicas_in_sync)
        per_replica_input_shapes = [(per_replica_batch_size, ) + s[1:]
                                    for s in input_shapes]

        # Create the models
        f32_model = self._create_model_from_layer(f32_layer, input_shapes)
        mp_model = self._create_model_from_layer(mp_layer, input_shapes)
        with strategy.scope():
            distributed_mp_model = self._create_model_from_layer(
                distributed_mp_layer, per_replica_input_shapes)

        # Set all model weights to the same values
        f32_weights = f32_model.get_weights()
        mp_model.set_weights(f32_weights)
        distributed_mp_model.set_weights(f32_weights)

        # Generate input data
        if input_data is None:
            # Cast inputs to float16 to avoid measuring error from having f16 layers
            # cast to float16.
            input_data = [
                np.random.normal(size=s).astype('float16')
                for s in input_shapes
            ]
            if len(input_data) == 1:
                input_data = input_data[0]

        # Assert all models have close outputs.
        f32_output = f32_model.predict(input_data)
        mp_output = mp_model.predict(input_data)
        self.assertAllClose(mp_output, f32_output, rtol=rtol, atol=atol)
        self.assertAllClose(distributed_mp_model.predict(input_data),
                            f32_output,
                            rtol=rtol,
                            atol=atol)

        # Run fit() on models
        output = np.random.normal(
            size=f32_model.outputs[0].shape).astype('float16')
        for model in f32_model, mp_model, distributed_mp_model:
            model.fit(input_data, output, batch_size=global_batch_size)

        # Assert all models have close weights
        f32_weights = f32_model.get_weights()
        self.assertAllClose(mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
        self.assertAllClose(distributed_mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
예제 #24
0
def get_unet2(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(inputs)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Dropout(0.3)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv1)
    conv1 = BatchNormalization(axis=1)(conv1)
    pool1 = MaxPooling2D((2, 2), data_format='channels_first')(conv1)

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool1)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Dropout(0.3)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv2)
    conv2 = BatchNormalization(axis=1)(conv2)
    pool2 = MaxPooling2D((2, 2), data_format='channels_first')(conv2)

    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool2)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Dropout(0.3)(conv3)
    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv3)
    conv3 = BatchNormalization(axis=1)(conv3)
    pool3 = MaxPooling2D((2, 2), data_format='channels_first')(conv3)

    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool3)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Dropout(0.3)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv4)
    conv4 = BatchNormalization(axis=1)(conv4)
    pool4 = MaxPooling2D((2, 2), data_format='channels_first')(conv4)

    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool4)
    conv5 = BatchNormalization(axis=1)(conv5)
    conv5 = Dropout(0.3)(conv5)
    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    conv5 = BatchNormalization(axis=1)(conv5)

    up1 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv5)
    up1 = concatenate([conv4, up1], axis=1)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Dropout(0.3)(conv6)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv6)
    conv6 = BatchNormalization(axis=1)(conv6)

    up2 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv6)
    up2 = concatenate([conv3, up2], axis=1)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Dropout(0.3)(conv7)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv7)
    conv7 = BatchNormalization(axis=1)(conv7)

    up3 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv7)
    up3 = concatenate([conv2, up3], axis=1)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up3)
    conv8 = BatchNormalization(axis=1)(conv8)
    conv8 = Dropout(0.3)(conv8)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv8)
    conv8 = BatchNormalization(axis=1)(conv8)

    up4 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv8)
    up4 = concatenate([conv1, up4], axis=1)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up4)
    conv9 = BatchNormalization(axis=1)(conv9)
    conv9 = Dropout(0.3)(conv9)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv9)
    conv9 = BatchNormalization(axis=1)(conv9)

    conv10 = Conv2D(2, (1, 1),
                    activation='relu',
                    padding='same',
                    data_format='channels_first')(conv9)
    conv10 = BatchNormalization(axis=1)(conv10)
    conv10 = core.Reshape((2, patch_height * patch_width))(conv10)
    conv10 = core.Permute((2, 1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    adaGrad = Adagrad(lr=1e-7, epsilon=1e-7, decay=1e-6)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #25
0
def get_gnet(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    up1 = UpSampling2D(size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv3)
    #
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv4)
    #
    conv5 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool3)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv5)
    #
    up2 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv6)
    #
    up3 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up3)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv7)
    #
    up4 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up4)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(conv8)
    #
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(pool4)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv9)
    #
    conv10 = Convolution2D(2, 1, 1, activation='relu',
                           border_mode='same')(conv9)
    conv10 = core.Reshape((2, patch_height * patch_width))(conv10)
    conv10 = core.Permute((2, 1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #26
0
def get_unet_seg(n_ch, img_rows=480, img_cols=480):
    inputs = Input((n_ch, img_rows, img_cols))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_1")(inputs)
    conv1 = BatchNormalization(axis=1, name="conv1_2")(conv1)
    conv1 = Dropout(0.5, name="conv1_3")(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_4")(conv1)
    conv1 = BatchNormalization(axis=1, name="conv1_5")(conv1)
    conv1.trainable = False
    pool1 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv1_6")(conv1)
    pool1.trainable = False

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_1")(pool1)
    conv2 = BatchNormalization(axis=1, name="conv2_2")(conv2)
    conv2 = Dropout(0.5, name="conv2_3")(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_4")(conv2)
    conv2 = BatchNormalization(axis=1, name="conv2_5")(conv2)
    conv2.trainable = False
    pool2 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv2_6")(conv2)
    pool2.trainable = False

    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_1")(pool2)
    conv3 = BatchNormalization(axis=1, name="conv3_2")(conv3)
    conv3 = Dropout(0.5, name="conv3_3")(conv3)
    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_4")(conv3)
    conv3 = BatchNormalization(axis=1, name="conv3_5")(conv3)
    conv3.trainable = False
    pool3 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv3_6")(conv3)
    pool3.trainable = False

    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_1")(pool3)
    conv4 = BatchNormalization(axis=1, name="conv4_2")(conv4)
    conv4 = Dropout(0.5, name="conv4_3")(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_4")(conv4)
    conv4 = BatchNormalization(axis=1, name="conv4_5")(conv4)
    conv4.trainable = False
    pool4 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv4_6")(conv4)
    pool4.trainable = False

    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_1")(pool4)
    conv5 = BatchNormalization(axis=1, name="conv5_2")(conv5)
    conv5 = Dropout(0.5, name="conv5_3")(conv5)
    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_4")(conv5)
    conv5 = BatchNormalization(axis=1, name="conv5_5")(conv5)
    conv5.trainable = False

    up1 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv5)
    up1 = concatenate([conv4, up1], axis=1)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Dropout(0.3)(conv6)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv6)
    conv6 = BatchNormalization(axis=1)(conv6)

    up2 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv6)
    up2 = concatenate([conv3, up2], axis=1)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Dropout(0.3)(conv7)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv7)
    conv7 = BatchNormalization(axis=1)(conv7)

    up3 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv7)
    up3 = concatenate([conv2, up3], axis=1)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up3)
    conv8 = BatchNormalization(axis=1)(conv8)
    conv8 = Dropout(0.3)(conv8)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv8)
    conv8 = BatchNormalization(axis=1)(conv8)

    up4 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv8)
    up4 = concatenate([conv1, up4], axis=1)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up4)
    conv9 = BatchNormalization(axis=1)(conv9)
    conv9 = Dropout(0.3)(conv9)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv9)
    conv9 = BatchNormalization(axis=1)(conv9)

    conv10 = Conv2D(2, (1, 1),
                    activation='relu',
                    padding='same',
                    data_format='channels_first')(conv9)
    conv10 = BatchNormalization(axis=1)(conv10)
    conv10 = core.Reshape((2, img_rows * img_cols))(conv10)
    conv10 = core.Permute((2, 1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    adaGrad = Adagrad(lr=1e-7, epsilon=1e-7, decay=1e-6)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #27
0
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 2)),
              nb_epoch=1,
              batch_size=10)

    # test config
    model.get_config()

    # compare to TimeDistributedDense
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        core.TimeDistributedDense(2, input_shape=(3, 4), weights=weights))
    reference.add(core.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test when specifying a batch_input_shape
    reference = Sequential()
    reference.add(
        core.TimeDistributedDense(2,
                                  batch_input_shape=(1, 3, 4),
                                  weights=weights))
    reference.add(core.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Convolution2D
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(convolutional.Convolution2D(
            5, 2, 2, border_mode='same'),
                                 input_shape=(2, 4, 4, 3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
                         np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(core.Dense(3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 3)),
              nb_epoch=1,
              batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(core.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    nb_epoch=1,
                    batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    nb_epoch=1,
                    batch_size=10)
예제 #28
0
def get_unet_trainable_seg(n_ch, img_rows=480, img_cols=480):
    inputs = Input((n_ch, img_rows, img_cols))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_1")(inputs)
    conv1 = BatchNormalization(axis=1, name="conv1_2")(conv1)
    conv1 = Dropout(0.5, name="conv1_3")(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_4")(conv1)
    conv1 = BatchNormalization(axis=1, name="conv1_5")(conv1)
    conv1.trainable = True
    pool1 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv1_6")(conv1)
    pool1.trainable = True

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_1")(pool1)
    conv2 = BatchNormalization(axis=1, name="conv2_2")(conv2)
    conv2 = Dropout(0.5, name="conv2_3")(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_4")(conv2)
    conv2 = BatchNormalization(axis=1, name="conv2_5")(conv2)
    conv2.trainable = True
    pool2 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv2_6")(conv2)
    pool2.trainable = True

    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_1")(pool2)
    conv3 = BatchNormalization(axis=1, name="conv3_2")(conv3)
    conv3 = Dropout(0.5, name="conv3_3")(conv3)
    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_4")(conv3)
    conv3 = BatchNormalization(axis=1, name="conv3_5")(conv3)
    conv3.trainable = True
    pool3 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv3_6")(conv3)
    pool3.trainable = True

    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_1")(pool3)
    conv4 = BatchNormalization(axis=1, name="conv4_2")(conv4)
    conv4 = Dropout(0.5, name="conv4_3")(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_4")(conv4)
    conv4 = BatchNormalization(axis=1, name="conv4_5")(conv4)
    conv4.trainable = True
    pool4 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv4_6")(conv4)
    pool4.trainable = True

    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_1")(pool4)
    conv5 = BatchNormalization(axis=1, name="conv5_2")(conv5)
    conv5 = Dropout(0.5, name="conv5_3")(conv5)
    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_4")(conv5)
    conv5 = BatchNormalization(axis=1, name="conv5_5")(conv5)
    conv5.trainable = True

    up1 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv5)
    up1 = concatenate([conv4, up1], axis=1)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Dropout(0.3)(conv6)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv6)
    conv6 = BatchNormalization(axis=1)(conv6)

    up2 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv6)
    up2 = concatenate([conv3, up2], axis=1)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Dropout(0.3)(conv7)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv7)
    conv7 = BatchNormalization(axis=1)(conv7)

    up3 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv7)
    up3 = concatenate([conv2, up3], axis=1)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up3)
    conv8 = BatchNormalization(axis=1)(conv8)
    conv8 = Dropout(0.3)(conv8)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv8)
    conv8 = BatchNormalization(axis=1)(conv8)

    up4 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv8)
    up4 = concatenate([conv1, up4], axis=1)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up4)
    conv9 = BatchNormalization(axis=1)(conv9)
    conv9 = Dropout(0.3)(conv9)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv9)
    conv9 = BatchNormalization(axis=1)(conv9)

    conv10 = Conv2D(1, (1, 1),
                    activation='relu',
                    padding='same',
                    data_format='channels_first')(conv9)
    conv10 = BatchNormalization(axis=1)(conv10)
    ############
    conv10 = core.Activation('sigmoid')(conv10)

    model = Model(input=inputs, output=conv10)

    return model
예제 #29
0
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    #
    conv6 = Conv2D(2, (1, 1),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #30
0
파일: models.py 프로젝트: theislab/LODE
def att_unet(params, data_format='channels_last'):
    inputs = Input((params.img_shape, params.img_shape, 3))
    x = inputs
    depth = params.depth
    features = params.n_filters
    skips = []
    for i in range(depth):
        x = Conv2D(features, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format=data_format)(x)

        if params.batchnorm:
            x = BatchNormalization()(x)

        x = Dropout(params.dropout)(x)
        x = Conv2D(features, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format=data_format)(x)

        if params.batchnorm:
            x = BatchNormalization()(x)

        skips.append(x)
        x = MaxPooling2D((2, 2), data_format=data_format)(x)

    x = Conv2D(features, (3, 3),
               activation='relu',
               padding='same',
               data_format=data_format)(x)
    x = Dropout(params.dropout)(x)
    x = Conv2D(features, (3, 3),
               activation='relu',
               padding='same',
               data_format=data_format)(x)

    for i in reversed(range(depth)):
        if i in [0, 1, 2]:
            features = features // 2
        x = attention_up_and_concate(x,
                                     skips[i],
                                     features,
                                     data_format=data_format)
        x = Conv2D(features, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format=data_format)(x)

        if params.batchnorm:
            x = BatchNormalization()(x)

        x = Dropout(params.dropout)(x)
        x = Conv2D(features, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format=data_format)(x)

        if params.batchnorm:
            x = BatchNormalization()(x)

    conv6 = Conv2D(params.num_classes, (1, 1),
                   padding='same',
                   data_format=data_format)(x)
    conv7 = core.Activation('softmax')(conv6)
    model = Model(inputs=inputs, outputs=conv7)
    return model