コード例 #1
0
ファイル: densenet.py プロジェクト: julilien/LabelRelaxation
def conv_block(x, stage, branch, nb_filter, dropout_rate=None):
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4
    x = BatchNormalization(name=conv_name_base + '_x1_bn')(x)
    x = Activation('relu', name=relu_name_base + '_x1')(x)
    x = Convolution2D(inter_channel,
                      1,
                      1,
                      name=conv_name_base + '_x1',
                      use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3x3 Convolution
    x = BatchNormalization(name=conv_name_base + '_x2_bn')(x)
    x = Activation('relu', name=relu_name_base + '_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base + '_x2_zeropadding')(x)
    x = Convolution2D(nb_filter,
                      3,
                      1,
                      name=conv_name_base + '_x2',
                      use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    return x
コード例 #2
0
ファイル: STResNet.py プロジェクト: uZeroJ/DeepSTN
def stresnet(c_conf=(3, 2, 32, 32), p_conf=(3, 2, 32, 32), t_conf=(3, 2, 32, 32),
             external_dim=8, nb_residual_unit=3, CF=64):
    '''
    C - Temporal Closeness
    P - Period
    T - Trend
    conf = (len_seq, nb_flow, map_height, map_width)
    external_dim
    '''

    # main input
    main_inputs = []
    outputs = []
    for conf in [c_conf, p_conf, t_conf]:
        if conf is not None:
            len_seq, nb_flow, map_height, map_width = conf
            input = Input(shape=(nb_flow * len_seq, map_height, map_width))
            main_inputs.append(input)
            # Conv1
            conv1 = Convolution2D(
                filters=CF, kernel_size=(3, 3), padding="same")(input)
            # [nb_residual_unit] Residual Units
            residual_output = ResUnits(_residual_unit, nb_filter=CF,
                                       repetations=nb_residual_unit)(conv1)
            # Conv2
            activation = Activation('relu')(residual_output)
            conv2 = Convolution2D(
                filters=nb_flow, kernel_size=(3, 3), padding="same")(activation)
            outputs.append(conv2)

    # parameter-matrix-based fusion
    if len(outputs) == 1:
        main_output = outputs[0]
    else:
        from BikeNYC.DST_network.ilayer import iLayer
        new_outputs = []
        for output in outputs:
            new_outputs.append(iLayer()(output))
        main_output = Add()(new_outputs)

    # fusing with external component
    if external_dim != None and external_dim > 0:
        # external input
        external_input = Input(shape=(external_dim,))
        main_inputs.append(external_input)
        embedding = Dense(units=10)(external_input)
        embedding = Activation('relu')(embedding)
        h1 = Dense(units=nb_flow * map_height * map_width)(embedding)
        activation = Activation('relu')(h1)
        external_output = Reshape((nb_flow, map_height, map_width))(activation)
        main_output = Add()([main_output, external_output])
    else:
        print('external_dim:', external_dim)

    main_output = Activation('tanh')(main_output)
    model = Model(input=main_inputs, output=main_output)

    return model
コード例 #3
0
ファイル: modelsCAE.py プロジェクト: roseorenbuch/imageCCA
def modelDecode(cae, filterSize, poolSize, gpus):
    if gpus > 1:
        cae = cae.layers[-2]

    # initialize decoder
    decode = Sequential()
    decode.add(
        Dense(128 * 4 * 4,
              input_dim=(1024),
              weights=cae.layers[18].get_weights()))
    decode.add(Activation('relu'))
    decode.add(Reshape((128, 4, 4)))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(64, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[23].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(32, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[26].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(16, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[29].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(8, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[32].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(3, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[35].get_weights()))
    decode.add(Activation('sigmoid'))

    if gpus > 1:
        decode = multi_gpu_model(decode, gpus=gpus)

    decode.compile(loss='mse', optimizer='adam')

    return decode
コード例 #4
0
ファイル: modelsCAE.py プロジェクト: roseorenbuch/imageCCA
def modelEncode(cae, filterSize, poolSize, sampSize, gpus):
    if gpus > 1:
        cae = cae.layers[-2]

    # initialize encoder
    encode = Sequential()
    encode.add(
        Convolution2D(8, (filterSize, filterSize),
                      input_shape=(3, sampSize, sampSize),
                      padding='same',
                      weights=cae.layers[0].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(16, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[3].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(32, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[6].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(64, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[9].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(
        Convolution2D(128, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[12].get_weights()))
    encode.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    encode.add(Activation('relu'))
    encode.add(Flatten())
    encode.add(Dense(1024, weights=cae.layers[16].get_weights()))
    encode.add(Activation('relu'))

    if gpus > 1:
        encode = multi_gpu_model(encode, gpus=gpus)

    encode.compile(loss='mse', optimizer='adam')

    return encode
コード例 #5
0
ファイル: STResNet.py プロジェクト: uZeroJ/DeepSTN
 def f(input):
     if bn:
         # input = BatchNormalization(mode=0, axis=1)(input)
         input = BatchNormalization(mode=0, axis=1)(input)
     activation = Activation('relu')(input)
     # return Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col, subsample=subsample, border_mode="same")(activation)
     return Convolution2D(nb_filter, (nb_row, nb_col),
                          strides=subsample, padding='same')(activation)
コード例 #6
0
ファイル: densenet.py プロジェクト: julilien/LabelRelaxation
def densenet_model(growth_rate=12,
                   nb_layers=[16, 16, 16],
                   reduction=0.5,
                   dropout_rate=0.0,
                   classes=16,
                   shape=(32, 32, 3),
                   final_activation='softmax'):
    # compute compression factor
    compression = 1.0 - reduction
    nb_dense_block = len(nb_layers)
    nb_filter = 2 * growth_rate

    img_input = Input(shape=shape, name='data')

    x = Convolution2D(2 * growth_rate, 3, 1, name='conv1',
                      use_bias=False)(img_input)

    stage = 0
    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate)

        # Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate)
        nb_filter = int(nb_filter * compression)

    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate)

    x = BatchNormalization(name='conv_final_blk_bn', )(x)
    x = Activation('relu', name='relu_final_blk')(x)

    x = GlobalAveragePooling2D(name='pool_final')(x)
    output = Dense(classes, name='fc6', activation=final_activation)(x)

    return Model(inputs=img_input, outputs=output)
コード例 #7
0
ファイル: densenet.py プロジェクト: julilien/LabelRelaxation
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None):
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    x = BatchNormalization(name=conv_name_base + '_bn')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Convolution2D(int(nb_filter * compression),
                      1,
                      1,
                      name=conv_name_base,
                      use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling2D((2, 2), name=pool_name_base)(x)

    return x
コード例 #8
0
def segnet(input_shape,
           n_labels,
           kernel=3,
           pool_size=(2, 2),
           output_mode="softmax",
           model_number=1):

    if (model_number == 1):
        # encoder
        inputs = Input(shape=input_shape)

        conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
        conv_1 = Activation("relu")(conv_1)
        conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
        conv_2 = Activation("relu")(conv_2)
        conv_3 = Convolution2D(64, (kernel, kernel), padding="same")(conv_2)
        conv_3 = Activation("relu")(conv_3)

        pool_1 = MaxPooling2D(pool_size=pool_size, padding="valid")(conv_3)
        conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
        conv_4 = Activation("relu")(conv_4)
        conv_5 = Convolution2D(128, (kernel, kernel), padding="same")(conv_4)
        conv_5 = Activation("relu")(conv_5)
        conv_6 = Convolution2D(128, (kernel, kernel), padding="same")(conv_5)
        conv_6 = Activation("relu")(conv_6)

        pool_2 = MaxPooling2D(pool_size=pool_size, padding="valid")(conv_6)
        print("Encoder built..")

        # decoder

        # unpool_1 = Conv2DTranspose(128, (kernel, kernel), padding="same", strides=(2,2))(pool_2)
        unpool_1 = UpSampling2D(size=pool_size)(pool_2)
        conv_7 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_1)
        conv_7 = Activation("relu")(conv_7)
        conv_8 = Convolution2D(128, (kernel, kernel), padding="same")(conv_7)
        conv_8 = Activation("relu")(conv_8)
        conv_9 = Convolution2D(128, (kernel, kernel), padding="same")(conv_8)
        conv_9 = Activation("relu")(conv_9)

        unpool_2 = UpSampling2D(size=pool_size)(conv_9)
        # unpool_2 = Conv2DTranspose(64, (kernel, kernel), padding="same", strides=(2,2))(conv_9)
        conv_10 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_2)
        conv_10 = Activation("relu")(conv_10)
        conv_11 = Convolution2D(64, (kernel, kernel), padding="same")(conv_10)
        conv_11 = Activation("relu")(conv_11)

        conv_12 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_11)
        conv_12 = Reshape(
            (input_shape[0] * input_shape[1], n_labels),
            input_shape=(input_shape[0], input_shape[1], n_labels),
        )(conv_12)

        outputs = Activation(output_mode)(conv_12)
        print("Decoder built..")

        model = Model(inputs=inputs, outputs=outputs, name="SegNet")

    elif (model_number == 2):
        # encoder
        inputs = Input(shape=input_shape)

        conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
        conv_1 = Activation("relu")(conv_1)
        conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
        conv_2 = Activation("relu")(conv_2)
        conv_3 = Convolution2D(64, (kernel, kernel), padding="same")(conv_2)
        conv_3 = Activation("relu")(conv_3)

        pool_1 = MaxPooling2D(pool_size=pool_size, padding="valid")(conv_3)
        conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
        conv_4 = Activation("relu")(conv_4)
        conv_5 = Convolution2D(128, (kernel, kernel), padding="same")(conv_4)
        conv_5 = Activation("relu")(conv_5)
        conv_6 = Convolution2D(128, (kernel, kernel), padding="same")(conv_5)
        conv_6 = Activation("relu")(conv_6)

        pool_2 = MaxPooling2D(pool_size=pool_size, padding="valid")(conv_6)
        conv_7 = Convolution2D(256, (kernel, kernel), padding="same")(pool_2)
        conv_7 = Activation("relu")(conv_7)
        conv_8 = Convolution2D(256, (kernel, kernel), padding="same")(conv_7)
        conv_8 = Activation("relu")(conv_8)
        conv_9 = Convolution2D(256, (kernel, kernel), padding="same")(conv_8)
        conv_9 = Activation("relu")(conv_9)

        pool_3 = MaxPooling2D(pool_size=pool_size, padding="valid")(conv_9)
        print("Encoder built..")

        # decoder
        unpool_1 = Conv2DTranspose(256, (kernel, kernel),
                                   padding="same",
                                   strides=(2, 2))(pool_3)
        conv_10 = Convolution2D(256, (kernel, kernel),
                                padding="same")(unpool_1)
        conv_10 = Activation("relu")(conv_10)
        conv_11 = Convolution2D(256, (kernel, kernel), padding="same")(conv_10)
        conv_11 = Activation("relu")(conv_11)
        conv_12 = Convolution2D(256, (kernel, kernel), padding="same")(conv_11)
        conv_12 = Activation("relu")(conv_12)

        unpool_2 = Conv2DTranspose(128, (kernel, kernel),
                                   padding="same",
                                   strides=(2, 2))(conv_12)
        conv_13 = Convolution2D(128, (kernel, kernel),
                                padding="same")(unpool_2)
        conv_13 = Activation("relu")(conv_13)
        conv_14 = Convolution2D(128, (kernel, kernel), padding="same")(conv_13)
        conv_14 = Activation("relu")(conv_14)
        conv_15 = Convolution2D(128, (kernel, kernel), padding="same")(conv_14)
        conv_15 = Activation("relu")(conv_15)

        unpool_3 = Conv2DTranspose(64, (kernel, kernel),
                                   padding="same",
                                   strides=(2, 2))(conv_15)
        conv_16 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_3)
        conv_16 = Activation("relu")(conv_16)
        conv_17 = Convolution2D(64, (kernel, kernel), padding="same")(conv_16)
        conv_17 = Activation("relu")(conv_17)

        conv_18 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_17)
        conv_18 = Reshape(
            (input_shape[0] * input_shape[1], n_labels),
            input_shape=(input_shape[0], input_shape[1], n_labels),
        )(conv_18)

        outputs = Activation(output_mode)(conv_18)
        print("Decoder built..")

        model = Model(inputs=inputs, outputs=outputs, name="SegNet")

    elif (model_number == 3):
        # encoder
        inputs = Input(shape=input_shape)

        conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
        conv_1 = Activation("relu")(conv_1)
        conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
        conv_2 = Activation("relu")(conv_2)
        conv_3 = Convolution2D(64, (kernel, kernel), padding="same")(conv_2)
        conv_3 = Activation("relu")(conv_3)

        pool_1 = MaxPooling2D(pool_size=pool_size, padding="valid")(conv_3)
        conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
        conv_4 = Activation("relu")(conv_4)
        conv_5 = Convolution2D(128, (kernel, kernel), padding="same")(conv_4)
        conv_5 = Activation("relu")(conv_5)
        conv_6 = Convolution2D(128, (kernel, kernel), padding="same")(conv_5)
        conv_6 = Activation("relu")(conv_6)

        pool_2 = MaxPooling2D(pool_size=pool_size, padding="valid")(conv_6)
        print("Encoder built..")

        # decoder

        unpool_1 = Conv2DTranspose(128, (kernel, kernel),
                                   padding="same",
                                   strides=(2, 2),
                                   activation='relu')(pool_2)
        # unpool_1 = UpSampling2D(size=pool_size)(pool_2)
        conv_7 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_1)
        conv_7 = Activation("relu")(conv_7)
        conv_8 = Convolution2D(128, (kernel, kernel), padding="same")(conv_7)
        conv_8 = Activation("relu")(conv_8)
        # conv_9 = Convolution2D(128, (kernel, kernel), padding="same")(conv_8)
        # conv_9 = Activation("relu")(conv_9)

        # unpool_2 = UpSampling2D(size=pool_size)(conv_9)
        unpool_2 = Conv2DTranspose(64, (kernel, kernel),
                                   padding="same",
                                   strides=(2, 2),
                                   activation='relu')(conv_8)
        conv_10 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_2)
        conv_10 = Activation("relu")(conv_10)
        conv_11 = Convolution2D(64, (kernel, kernel), padding="same")(conv_10)
        conv_11 = Activation("relu")(conv_11)

        conv_12 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_11)
        conv_12 = Reshape(
            (input_shape[0] * input_shape[1], n_labels),
            input_shape=(input_shape[0], input_shape[1], n_labels),
        )(conv_12)

        outputs = Activation(output_mode)(conv_12)
        print("Decoder built..")

        model = Model(inputs=inputs, outputs=outputs, name="SegNet")
    return model
コード例 #9
0
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered

# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)

if K.image_data_format() == "channels_first":
    shape = (nb_tensors, 3, img_width, img_height)
else:
    shape = (nb_tensors, img_width, img_height, 3)

ip = Input(tensor=input_tensor, batch_shape=shape)

# build the VGG16 network with our 3 images as input
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)

x = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)

x = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
if args.model == "vgg19":
    x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)
x = pooling_func(x)

x = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
コード例 #10
0
ファイル: MRFNetwork.py プロジェクト: strategist922/SEEDBank
combination_image = K.placeholder((1, 3, img_width, img_height))

# combine the 4 images into a single Keras tensor
input_tensor = K.concatenate([
    style_image_tensor, style_image_mask_tensor, content_image_tensor,
    combination_image
],
                             axis=0)

# build the VGG16 network with our 3 images as input
first_layer = ZeroPadding2D((1, 1), )
first_layer.set_input(input_tensor, shape=(4, 3, img_width, img_height))

model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
コード例 #11
0
ファイル: segnet_model.py プロジェクト: tfug-cd/skyunet
def segnet(input_shape,
           n_labels,
           kernel=3,
           pool_size=(2, 2),
           output_mode="softmax"):
    # encoder
    inputs = Input(shape=input_shape)

    conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = Activation("relu")(conv_1)
    conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = Activation("relu")(conv_2)

    pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)

    conv_3 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = Activation("relu")(conv_3)
    conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(conv_3)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = Activation("relu")(conv_4)

    pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)

    conv_5 = Convolution2D(256, (kernel, kernel), padding="same")(pool_2)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = Activation("relu")(conv_5)
    conv_6 = Convolution2D(256, (kernel, kernel), padding="same")(conv_5)
    conv_6 = BatchNormalization()(conv_6)
    conv_6 = Activation("relu")(conv_6)
    conv_7 = Convolution2D(256, (kernel, kernel), padding="same")(conv_6)
    conv_7 = BatchNormalization()(conv_7)
    conv_7 = Activation("relu")(conv_7)

    pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)

    conv_8 = Convolution2D(512, (kernel, kernel), padding="same")(pool_3)
    conv_8 = BatchNormalization()(conv_8)
    conv_8 = Activation("relu")(conv_8)
    conv_9 = Convolution2D(512, (kernel, kernel), padding="same")(conv_8)
    conv_9 = BatchNormalization()(conv_9)
    conv_9 = Activation("relu")(conv_9)
    conv_10 = Convolution2D(512, (kernel, kernel), padding="same")(conv_9)
    conv_10 = BatchNormalization()(conv_10)
    conv_10 = Activation("relu")(conv_10)

    pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size)(conv_10)

    conv_11 = Convolution2D(512, (kernel, kernel), padding="same")(pool_4)
    conv_11 = BatchNormalization()(conv_11)
    conv_11 = Activation("relu")(conv_11)
    conv_12 = Convolution2D(512, (kernel, kernel), padding="same")(conv_11)
    conv_12 = BatchNormalization()(conv_12)
    conv_12 = Activation("relu")(conv_12)
    conv_13 = Convolution2D(512, (kernel, kernel), padding="same")(conv_12)
    conv_13 = BatchNormalization()(conv_13)
    conv_13 = Activation("relu")(conv_13)

    pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size)(conv_13)
    print("Build enceder done..")

    # decoder

    unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])

    conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1)
    conv_14 = BatchNormalization()(conv_14)
    conv_14 = Activation("relu")(conv_14)
    conv_15 = Convolution2D(512, (kernel, kernel), padding="same")(conv_14)
    conv_15 = BatchNormalization()(conv_15)
    conv_15 = Activation("relu")(conv_15)
    conv_16 = Convolution2D(512, (kernel, kernel), padding="same")(conv_15)
    conv_16 = BatchNormalization()(conv_16)
    conv_16 = Activation("relu")(conv_16)

    unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])

    conv_17 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_2)
    conv_17 = BatchNormalization()(conv_17)
    conv_17 = Activation("relu")(conv_17)
    conv_18 = Convolution2D(512, (kernel, kernel), padding="same")(conv_17)
    conv_18 = BatchNormalization()(conv_18)
    conv_18 = Activation("relu")(conv_18)
    conv_19 = Convolution2D(256, (kernel, kernel), padding="same")(conv_18)
    conv_19 = BatchNormalization()(conv_19)
    conv_19 = Activation("relu")(conv_19)

    unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])

    conv_20 = Convolution2D(256, (kernel, kernel), padding="same")(unpool_3)
    conv_20 = BatchNormalization()(conv_20)
    conv_20 = Activation("relu")(conv_20)
    conv_21 = Convolution2D(256, (kernel, kernel), padding="same")(conv_20)
    conv_21 = BatchNormalization()(conv_21)
    conv_21 = Activation("relu")(conv_21)
    conv_22 = Convolution2D(128, (kernel, kernel), padding="same")(conv_21)
    conv_22 = BatchNormalization()(conv_22)
    conv_22 = Activation("relu")(conv_22)

    unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])

    conv_23 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_4)
    conv_23 = BatchNormalization()(conv_23)
    conv_23 = Activation("relu")(conv_23)
    conv_24 = Convolution2D(64, (kernel, kernel), padding="same")(conv_23)
    conv_24 = BatchNormalization()(conv_24)
    conv_24 = Activation("relu")(conv_24)

    unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])

    conv_25 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_5)
    conv_25 = BatchNormalization()(conv_25)
    conv_25 = Activation("relu")(conv_25)

    conv_26 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_25)
    conv_26 = BatchNormalization()(conv_26)
    conv_26 = Reshape(
        (input_shape[0] * input_shape[1], n_labels),
        input_shape=(input_shape[0], input_shape[1], n_labels))(conv_26)

    outputs = Activation(output_mode)(conv_26)
    print("Build decoder done..")

    model = Model(inputs=inputs, outputs=outputs, name="SegNet")

    return model
コード例 #12
0
ファイル: modelsCAE.py プロジェクト: roseorenbuch/imageCCA
def modelCAE(filterSize, poolSize, sampSize, gpus, weights=None):
    #strategy = tf.distribute.MirroredStrategy()
    #with strategy.scope():
    # initialize cae
    cae = Sequential()

    # convolution + pooling 1
    cae.add(
        Convolution2D(8, (filterSize, filterSize),
                      input_shape=(3, sampSize, sampSize),
                      padding='same'))
    cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cae.add(Activation('relu'))

    # convolution + pooling 2
    cae.add(Convolution2D(16, (filterSize, filterSize), padding='same'))
    cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cae.add(Activation('relu'))

    # convolution + pooling 3
    cae.add(Convolution2D(32, (filterSize, filterSize), padding='same'))
    cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cae.add(Activation('relu'))

    # convolution + pooling 4
    cae.add(Convolution2D(64, (filterSize, filterSize), padding='same'))
    cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cae.add(Activation('relu'))

    # convolution + pooling 5
    cae.add(Convolution2D(128, (filterSize, filterSize), padding='same'))
    cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cae.add(Activation('relu'))

    # dense network
    cae.add(Flatten())
    cae.add(Dense(1024))
    cae.add(Activation('relu'))
    cae.add(Dense(128 * 4 * 4))
    cae.add(Activation('relu'))
    cae.add(Reshape((128, 4, 4)))
    cae.add(Activation('relu'))

    # unpooling + deconvolution 1
    cae.add(UpSampling2D(size=(poolSize, poolSize)))
    cae.add(Convolution2D(64, (filterSize, filterSize), padding='same'))
    cae.add(Activation('relu'))

    # unpooling + deconvolution 2
    cae.add(UpSampling2D(size=(poolSize, poolSize)))
    cae.add(Convolution2D(32, (filterSize, filterSize), padding='same'))
    cae.add(Activation('relu'))

    # unpooling + deconvolution 3
    cae.add(UpSampling2D(size=(poolSize, poolSize)))
    cae.add(Convolution2D(16, (filterSize, filterSize), padding='same'))
    cae.add(Activation('relu'))

    # unpooling + deconvolution 4
    cae.add(UpSampling2D(size=(poolSize, poolSize)))
    cae.add(Convolution2D(8, (filterSize, filterSize), padding='same'))
    cae.add(Activation('relu'))

    # final unpooling + deconvolution
    cae.add(UpSampling2D(size=(poolSize, poolSize)))
    cae.add(Convolution2D(3, (filterSize, filterSize), padding='same'))
    cae.add(Activation('sigmoid'))  # ADDITION -DM

    # compile and load pretrained weights
    if gpus > 1:
        cae = multi_gpu_model(cae, gpus=gpus)
    cae.compile(loss='mse', optimizer=Adam(lr=0.0005, decay=1e-5))
    if weights:
        #print('loading pretrained weights')
        cae.load_weights(weights)

    return cae
コード例 #13
0
ファイル: modelsCAE.py プロジェクト: roseorenbuch/imageCCA
def modelClassifier(cae, filterSize, poolSize, gpus):
    if gpus > 1:
        cae = cae.layers[-2]

    cl = Sequential()
    cl.add(
        Convolution2D(8, (filterSize, filterSize),
                      input_shape=(3, sampSize, sampSize),
                      padding='same',
                      weights=cae.layers[0].get_weights()))
    cl.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cl.add(Activation('relu'))
    cl.add(
        Convolution2D(16, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[3].get_weights()))
    cl.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cl.add(Activation('relu'))
    cl.add(
        Convolution2D(32, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[6].get_weights()))
    cl.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cl.add(Activation('relu'))
    cl.add(
        Convolution2D(64, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[9].get_weights()))
    cl.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cl.add(Activation('relu'))
    cl.add(
        Convolution2D(128, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[12].get_weights()))
    cl.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
    cl.add(Activation('relu'))
    cl.add(Flatten())
    cl.add(Dense(1024, weights=cae.layers[16].get_weights()))
    cl.add(Activation('relu'))
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)

    cl.add(Dense(100))
    cl.add(Dropout(0.5))
    cl.add(Activation('relu'))

    if nClasses == 2:
        cl.add(Dense(1))
        cl.add(Dropout(0.5))
        cl.add(Activation('sigmoid'))

        if gpus > 1:
            cl = multi_gpu_model(cl, gpus=gpus)

        cl.compile(loss='binary_crossentropy',
                   optimizer=Adam(lr=5e-7, decay=1e-5))
    else:
        cl.add(Dense(nClasses))
        cl.add(Dropout(0.5))
        cl.add(Activation('softmax'))

        if gpus > 1:
            cl = multi_gpu_model(cl, gpus=gpus)

        cl.compile(loss='categorical_crossentropy',
                   optimizer=Adam(lr=5e-7, decay=1e-5))
    return cl