Ejemplo n.º 1
0
def create_generator_model():

    model = Sequential()

    model.add(L.InputLayer(input_shape=[data_sample.CODE_SIZE], name="noise"))

    model.add(L.Dense(8 * 8 * 10, activation='elu'))

    model.add(L.Reshape([8, 8, 10]))

    model.add(L.Deconv2D(filters=64, kernel_size=[5, 5], activation='elu'))

    model.add(L.Deconv2D(filters=64, kernel_size=[5, 5], activation='elu'))

    model.add(L.UpSampling2D(size=[2, 2]))

    model.add(L.Deconv2D(filters=32, kernel_size=[3, 3], activation='elu'))

    model.add(L.Deconv2D(filters=32, kernel_size=[3, 3], activation='elu'))

    model.add(L.Deconv2D(filters=32, kernel_size=[3, 3], activation='elu'))

    model.add(L.Conv2D(filters=3, kernel_size=[3, 3], activation=None))

    return model
Ejemplo n.º 2
0
def network(train_generator, validation_generator, test_generator,
            callback_list):
    model = Sequential()
    model.add(
        layers.Deconv2D(32, (3, 3),
                        activation="relu",
                        input_shape=(150, 150, 3)))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Deconv2D(64, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Deconv2D(128, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Deconv2D(128, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dense(1, activation="sigmoid"))
    model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=50,
                                  epochs=50,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=50)
    model.save('cats_and_dogs_small_2.h5')
    return history
Ejemplo n.º 3
0
    def create_generator(self):
        """ Model to generate images from random noise."""

        generator = Sequential()
        generator.add(L.InputLayer([self.CODE_SIZE], name='noise'))
        generator.add(L.Dense(16 * 16 * 10, activation='elu'))
        generator.add(L.Reshape((16, 16, 10)))

        generator.add(L.Deconv2D(64, kernel_size=(5, 5), activation='elu'))
        generator.add(L.Deconv2D(64, kernel_size=(5, 5), activation='elu'))
        generator.add(L.UpSampling2D(size=(2, 2)))

        generator.add(L.Deconv2D(32, kernel_size=3, activation='elu'))
        generator.add(L.Deconv2D(32, kernel_size=3, activation='elu'))
        generator.add(L.Conv2D(3, kernel_size=3, activation=None))

        self.generator = generator
        print('Generator created successfully.')
Ejemplo n.º 4
0

############### main ##################

tf.test.gpu_device_name()
gpu_options = tf.GPUOptions(allow_growth=True,
                            per_process_gpu_memory_fraction=1)
s = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))

#red generadora

generator = Sequential()
generator.add(L.InputLayer([CODE_SIZE], name='noise'))
generator.add(L.Dense(128 * 16 * 16, activation='elu'))
generator.add(L.Reshape((16, 16, 128)))
generator.add(L.Deconv2D(128, kernel_size=(2, 2), activation='elu'))
generator.add(L.Deconv2D(64, kernel_size=(3, 3), activation='elu'))
generator.add(L.UpSampling2D(size=(5, 5)))
generator.add(L.Deconv2D(64, kernel_size=4, activation='elu'))
generator.add(L.Deconv2D(64, kernel_size=3, activation='elu'))
generator.add(L.Deconv2D(64, kernel_size=2, activation='elu'))
generator.add(L.Conv2D(3, kernel_size=2, activation=None))
generator.summary()

discriminator = Sequential()

discriminator.add(L.InputLayer(IMG_SHAPE))

discriminator.add(L.Conv2D(8, kernel_size=3))
discriminator.add(L.BatchNormalization())
discriminator.add(L.advanced_activations.LeakyReLU(alpha=.1))
Ejemplo n.º 5
0
    means, logs = args
    eps = K.random_normal(shape=(K.shape(means)),
                          mean=0, stddev=1)
    return means + K.exp(logs / 2) * eps

# 采样并输出
z = layers.Lambda(gaussian_sample)([means, z_logs])

# VAE解码网络
decoder_input = Input(K.int_shape(z)[1:])
# 线性变换扩展维度
x = layers.Dense(np.prod(shape_before_flattening), activation='relu')(decoder_input)
# 变化维度, 以便可以输入到反卷积中
x = layers.Reshape(shape_before_flattening)(x)
# 反卷积
x = layers.Deconv2D(32, 3, strides=2, padding='same', activation='relu')(x)
# 生成灰度图
x = layers.Conv2D(1, 3, padding='same', activation='sigmoid')(x)

# 解码器模型
decoder = Model(decoder_input, x)

decoder.summary()

# 将z传入decoder中恢复图像
z_decoded = decoder(z)

def KL_loss(y_true, y_pred):
    '''
    KL 散度损失
    '''
def CGScom(data):
    # 编码部分使用VGG166,不包含全连接层,输入图片大小定为224*224,最后一层是block5_pool(MaxPooling2D)大小为7*7*512
    # base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 13), input_tensor=data)
    # # 设置VGG模型部分为可训练
    # base.trainable = True

    x0 = layers.Conv2D(64, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block1_conv')(data)
    y = attach_attention_module(x0, 'cbam_block')
    fusion = layers.add([x0, y])
    x0 = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x0)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x1 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x2 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x3 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x4 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x5 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    # ##自己写的

    # # 14*14*512
    x = layers.Deconv2D(512, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv1')(x5)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x4])
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx1 = layers.Activation('relu')(fusion)
    # x = layers.Conv2D(512, (1, 1), padding='same', activation='relu', name='deconv1_conv4')(x)
    # y = attach_attention_module(x, 'cbam_block')
    # x = layers.add([x, y])
    # x = layers.Activation('relu')(x)

    # 28*28*256
    x = layers.Deconv2D(256, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv2')(xx1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x3])
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv3')(x)
    # x = layers.Conv2D(256, (1, 1), padding='same', activation='relu', name='deconv2_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx2 = layers.Activation('relu')(fusion)

    # 56*56*128
    x = layers.Deconv2D(128, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv3')(xx2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x2])
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv3')(x)
    # x = layers.Conv2D(128, (1, 1), padding='same', activation='relu', name='deconv3_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx3 = layers.Activation('relu')(fusion)

    # 112*112*64
    x = layers.Deconv2D(64, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv4')(xx3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x1])
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv2')(x)
    # x = layers.Conv2D(64, (1, 1), padding='same', activation='relu', name='deconv4_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx4 = layers.Activation('relu')(fusion)

    # 224*224*32
    x = layers.Deconv2D(32, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv5')(xx4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x0])
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv2')(x)
    # x = layers.Conv2D(32, (1, 1), padding='same', activation='relu', name='deconv5_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)

    # 224*224*1 结果1
    x0 = layers.Conv2D(1, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       name='conv6_end')(x)

    # # train
    # return [x0, x0]
    # # return [x, fusion]

    # test
    m = Model(data, x0, name='CGScom')
    return m
def CGS(data):
    # 编码部分使用VGG166,不包含全连接层,输入图片大小定为224*224,最后一层是block5_pool(MaxPooling2D)大小为7*7*512
    # base = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 13), input_tensor=data)
    # # 设置VGG模型部分为可训练
    # base.trainable = True

    x0 = layers.Conv2D(64, (3, 3),
                       activation='relu',
                       padding='same',
                       name='block1_conv')(data)
    y = attach_attention_module(x0, 'cbam_block')
    fusion = layers.add([x0, y])
    x0 = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x0)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x1 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x2 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x3 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x4 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x5 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    # ##自己写的

    # # 14*14*512
    x = layers.Deconv2D(512, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv1')(x5)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x4])
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(512, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv1_conv3')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx1 = layers.Activation('relu')(fusion)
    # x = layers.Conv2D(512, (1, 1), padding='same', activation='relu', name='deconv1_conv4')(x)
    # y = attach_attention_module(x, 'cbam_block')
    # x = layers.add([x, y])
    # x = layers.Activation('relu')(x)

    # 28*28*256
    x = layers.Deconv2D(256, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv2')(xx1)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x3])
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(256, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv2_conv3')(x)
    # x = layers.Conv2D(256, (1, 1), padding='same', activation='relu', name='deconv2_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx2 = layers.Activation('relu')(fusion)

    # 56*56*128
    x = layers.Deconv2D(128, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv3')(xx2)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x2])
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv2')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(128, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv3_conv3')(x)
    # x = layers.Conv2D(128, (1, 1), padding='same', activation='relu', name='deconv3_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx3 = layers.Activation('relu')(fusion)

    # 112*112*64
    x = layers.Deconv2D(64, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv4')(xx3)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x1])
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(64, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv4_conv2')(x)
    # x = layers.Conv2D(64, (1, 1), padding='same', activation='relu', name='deconv4_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    xx4 = layers.Activation('relu')(fusion)

    # 224*224*32
    x = layers.Deconv2D(32, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        name='deconv5')(xx4)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Concatenate()([x, x0])
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv1')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)
    x = layers.Conv2D(32, (3, 3),
                      padding='same',
                      activation='relu',
                      name='deconv5_conv2')(x)
    # x = layers.Conv2D(32, (1, 1), padding='same', activation='relu', name='deconv5_conv4')(x)
    y = attach_attention_module(x, 'cbam_block')
    fusion = layers.add([x, y])
    x = layers.Activation('relu')(fusion)

    # 224*224*1 结果1
    x0 = layers.Conv2D(1, (1, 1),
                       padding='same',
                       activation='relu',
                       name='conv6_end')(x)

    # concat
    d2 = layers.Lambda(lambda x: x[:, :, :, 3])(data)
    y7 = layers.Lambda(lambda x: K.expand_dims(x, axis=3))(d2)
    f = layers.Concatenate()([y7, x0])

    # optimization
    fusion1 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion1_conv1')(f)
    y = attach_attention_module(fusion1, 'cbam_block')
    fusion1 = layers.add([fusion1, y])
    fusion1 = layers.Activation('relu')(fusion1)
    fusion1 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion1_conv2')(fusion1)
    y = attach_attention_module(fusion1, 'cbam_block')
    fusion1 = layers.add([fusion1, y])
    fusion1 = layers.Activation('relu')(fusion1)
    fusion1 = layers.MaxPooling2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='fusion1_maxpooling')(fusion1)

    fusion2 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion2_conv1')(fusion1)
    y = attach_attention_module(fusion2, 'cbam_block')
    fusion2 = layers.add([fusion2, y])
    fusion2 = layers.Activation('relu')(fusion2)
    fusion2 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion2_conv2')(fusion2)
    y = attach_attention_module(fusion2, 'cbam_block')
    fusion2 = layers.add([fusion2, y])
    fusion2 = layers.Activation('relu')(fusion2)
    fusion2 = layers.MaxPooling2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='fusion2_maxpooling')(fusion2)

    fusion3 = layers.Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion3_conv1')(fusion2)
    y = attach_attention_module(fusion3, 'cbam_block')
    fusion3 = layers.add([fusion3, y])
    fusion3 = layers.Activation('relu')(fusion3)
    fusion3 = layers.Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion3_conv2')(fusion3)
    y = attach_attention_module(fusion3, 'cbam_block')
    fusion3 = layers.add([fusion3, y])
    fusion3 = layers.Activation('relu')(fusion3)
    fusion3 = layers.Conv2D(256, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion3_conv3')(fusion3)
    y = attach_attention_module(fusion3, 'cbam_block')
    fusion3 = layers.add([fusion3, y])
    fusion3 = layers.Activation('relu')(fusion3)
    fusion3 = layers.MaxPooling2D((2, 2),
                                  strides=(2, 2),
                                  padding='same',
                                  name='fusion3_maxpooling')(fusion3)

    fusion4 = layers.Deconv2D(128, (4, 4),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='fusion4_deconv')(fusion3)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)
    fusion4 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion4_conv1')(fusion4)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)
    fusion4 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion4_conv2')(fusion4)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)
    fusion4 = layers.Conv2D(128, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion4_conv3')(fusion4)
    y = attach_attention_module(fusion4, 'cbam_block')
    fusion4 = layers.add([fusion4, y])
    fusion4 = layers.Activation('relu')(fusion4)

    fusion5 = layers.Deconv2D(64, (4, 4),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='fusion5_deconv')(fusion4)
    y = attach_attention_module(fusion5, 'cbam_block')
    fusion5 = layers.add([fusion5, y])
    fusion5 = layers.Activation('relu')(fusion5)
    fusion5 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion5_conv1')(fusion5)
    y = attach_attention_module(fusion5, 'cbam_block')
    fusion5 = layers.add([fusion5, y])
    fusion5 = layers.Activation('relu')(fusion5)
    fusion5 = layers.Conv2D(64, (3, 3),
                            padding='same',
                            activation='relu',
                            name='fusion5_conv2')(fusion5)
    y = attach_attention_module(fusion5, 'cbam_block')
    fusion5 = layers.add([fusion5, y])
    fusion5 = layers.Activation('relu')(fusion5)

    fusion6 = layers.Deconv2D(32, (4, 4),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='fusion6_deconv')(fusion5)
    y = attach_attention_module(fusion6, 'cbam_block')
    fusion6 = layers.add([fusion6, y])
    fusion6 = layers.Activation('relu')(fusion6)
    fusion6 = layers.Conv2D(32, (1, 1),
                            padding='same',
                            activation='relu',
                            name='fusion6_conv1')(fusion6)
    y = attach_attention_module(fusion6, 'cbam_block')
    fusion6 = layers.add([fusion6, y])
    fusion6 = layers.Activation('relu')(fusion6)
    fusion6 = layers.Conv2D(1, (1, 1),
                            padding='same',
                            activation='relu',
                            name='fusion6_conv2')(fusion6)
    # y = attach_attention_module(fusion6, 'cbam_block')
    # fusion6 = layers.add([fusion6, y])
    # fusion6 = layers.Activation('relu')(fusion6)
    fusion = layers.Conv2D(1, (1, 1),
                           padding='same',
                           activation='sigmoid',
                           name='fusion')(fusion6)

    # # train
    # return [fusion, fusion]
    # # return [x, fusion]

    # test
    m = Model(data, fusion, name='CGS')
    return m
Ejemplo n.º 8
0
    def build_generator(self):
        # We will map z, a latent vector, to continuous returns data space (..., 1).

        model = Sequential()

        #print(input.shape[0],input.shape[1])
        model.add(
            layers.Dense(256 * 1 * 25,
                         activation="relu",
                         input_dim=self.latency_dim))
        print('output_shape:', model.output_shape)
        #model.add(layers.Reshape((25,1,128)))
        model.add(layers.BatchNormalization())
        model.add(layers.Activation("relu"))
        print(model.output_shape)
        model.add(layers.Reshape((25, 1, 256)))
        assert model.output_shape == (None, 25, 1, 256)
        model.add(
            layers.Deconv2D(filters=256,
                            kernel_size=(5, 5),
                            strides=(1, 1),
                            padding='same',
                            use_bias=False))
        print(model.output_shape)
        assert model.output_shape == (None, 25, 1, 256)
        model.add(layers.BatchNormalization())
        model.add(layers.Activation("relu"))
        #model.add(layers.LeakyReLU())
        #print(model.output_shape)
        model.add(
            layers.Deconv2D(filters=128,
                            kernel_size=(5, 5),
                            strides=(2, 1),
                            activation='relu',
                            padding='same',
                            use_bias=False))
        print(model.output_shape)
        assert model.output_shape == (None, 50, 1, 128)
        model.add(layers.BatchNormalization())
        model.add(layers.Activation("relu"))
        #model.add(layers.LeakyReLU())
        #print(model.output_shape)
        model.add(
            layers.Deconv2D(filters=64,
                            kernel_size=(5, 5),
                            strides=(2, 1),
                            activation='relu',
                            padding='same',
                            use_bias=False))
        print(model.output_shape)
        assert model.output_shape == (None, 100, 1, 64)
        model.add(layers.BatchNormalization())
        model.add(layers.Activation("relu"))
        #model.add(layers.LeakyReLU())
        #print(model.output_shape)
        model.add(
            layers.Conv2DTranspose(filters=self.audio_channels,
                                   kernel_size=(5, 5),
                                   strides=(1, 1),
                                   activation='relu',
                                   padding='same',
                                   use_bias=False))
        print(model.output_shape)
        assert model.output_shape == (None, 100, 1, 1)
        #model.add(Flatten())
        print(model.output_shape)
        model.add(layers.Activation("tanh"))

        #print (model.output_shape)
        '''
        model.add(Dense(units=1))
        model.add(GlobalAveragePooling1D())
        '''
        #model.add(Reshape(self.input_shape))
        model.summary()
        print(model.summary())

        noise = Input(shape=(self.latency_dim, ))
        raw_data = model(noise)

        print('generator shape', raw_data.shape)

        model.compile(loss='mean_squared_error',
                      optimizer=optimizer_g,
                      metrics=['mae'])
        '''
        history_callback=model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,\
                                   verbose=2, validation_data=[x_test,y_test],shuffle = True)
        '''

        return Model(noise, raw_data)
import tensorflow as tf
from keras_utils import reset_tf_session
s = reset_tf_session()

import keras
from keras.models import Sequential
from keras import layers as L

CODE_SIZE = 256

generator = Sequential()
generator.add(L.InputLayer([CODE_SIZE],name='noise'))
generator.add(L.Dense(10*8*8, activation='elu'))

generator.add(L.Reshape((8,8,10)))
generator.add(L.Deconv2D(64,kernel_size=(5,5),activation='elu'))
generator.add(L.Deconv2D(64,kernel_size=(5,5),activation='elu'))
generator.add(L.UpSampling2D(size=(2,2)))
generator.add(L.Deconv2D(32,kernel_size=3,activation='elu'))
generator.add(L.Deconv2D(32,kernel_size=3,activation='elu'))
generator.add(L.Deconv2D(32,kernel_size=3,activation='elu'))

generator.add(L.Conv2D(3,kernel_size=3,activation=None))

assert generator.output_shape[1:] == IMG_SHAPE, "generator must output an image of shape %s, but instead it produces %s"%(IMG_SHAPE,generator.output_shape[1:])

"""### Discriminator
* Discriminator is your usual convolutional network with interlooping convolution and pooling layers
* The network does not include dropout/batchnorm to avoid learning complications.
* We also regularize the pre-output layer to prevent discriminator from being too certain.
"""
def fcn8_l2_graph(feature_map , config, mode = None ):
    '''Builds the computation graph of Region Proposal Network.

    feature_map:            Contextual Tensor [batch, num_classes, width, depth]

    Returns:


    '''
    print()
    print('------------------------------------------------------')
    print('>>> FCN8L2 Layer With Regularization - mode:', mode)
    print('------------------------------------------------------')
    batch_size        = config.BATCH_SIZE
    height, width     = config.FCN_INPUT_SHAPE[0:2]
    num_classes       = config.NUM_CLASSES
    rois_per_class    = config.TRAIN_ROIS_PER_IMAGE
    weight_decay      = config.WEIGHT_DECAY
    # In the original implementatoin , batch_momentum was used for batch normalization layers for the ResNet
    # backbone. We are not using this backbone in FCN, therefore it is unused.
    # batch_momentum    = config.BATCH_MOMENTUM
    verbose           = config.VERBOSE
    feature_map_shape = (width, height, num_classes)
    print('     feature map      :', feature_map.shape)
    print('     height :', height, 'width :', width, 'classes :' , num_classes)
    print('     image_data_format: ', KB.image_data_format())
    print('     rois_per_class   : ', KB.image_data_format())
    print('     FCN L2 weight decay : ', weight_decay)

    if mode == 'training':
        KB.set_learning_phase(1)
    else:
        KB.set_learning_phase(0)
    print('     Set learning phase to :', KB.learning_phase())
     
    # feature_map = KL.Input(shape= feature_map_shape, name="input_fcn_feature_map")
    
    # TODO: Assert proper shape of input [batch_size, width, height, num_classes]
    # TODO: check if stride of 2 causes alignment issues if the featuremap is not even.
    
    # if batch_shape:
        # img_input = Input(batch_shape=batch_shape)
        # image_size = batch_shape[1:3]
    # else:
        # img_input = Input(shape=input_shape)
        # image_size = input_shape[0:2]
    
    ##-------------------------------------------------------------------------------------------------------
    ## Block 1    data_format='channels_last',
    ##-------------------------------------------------------------------------------------------------------    
    x = KL.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', 
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(feature_map)
    print('   Input feature map                   : ', feature_map.shape)
    print('   FCN Block 11 shape is               : ' , KB.int_shape(x))
    
    x = KL.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', 
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 12 shape is               : ' , KB.int_shape(x))         
    
    x = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    print('   FCN Block 13 (Max pooling) shape is : ' , KB.int_shape(x))
    
    ##-------------------------------------------------------------------------------------------------------
    ## Block 2
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', 
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 21 shape is               : ' , x.get_shape())
    
    x = KL.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 22 shape is               : ' , KB.int_shape(x))    
    
    x = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    print('   FCN Block 23 (Max pooling) shape is : ' , KB.int_shape(x))    
    
    ##-------------------------------------------------------------------------------------------------------
    ## Block 3
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 31 shape is               : ' , KB.int_shape(x))            
    
    x = KL.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', 
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 32 shape is               : ' , KB.int_shape(x))    
    
    x = KL.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 33 shape is               : ' , KB.int_shape(x))            
    
    Pool3 = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    print('   FCN Block 34 (Max pooling) shape is : ' ,Pool3.get_shape())    
    
    ##-------------------------------------------------------------------------------------------------------
    ## Block 4
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', 
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(Pool3)
    print('   FCN Block 41 shape is               : ' , KB.int_shape(x))            
    x = KL.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 42 shape is               : ' , KB.int_shape(x))            
    x = KL.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 43 shape is               : ' , KB.int_shape(x))                
    Pool4 = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    print('   FCN Block 44 (Max pooling) shape is : ' ,Pool4.get_shape())    
    
    ##-------------------------------------------------------------------------------------------------------
    ## Block 5
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(Pool4)
    print('   FCN Block 51 shape is               : ' , KB.int_shape(x))
    
    x = KL.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 52 shape is               : ' , KB.int_shape(x))                 

    x = KL.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3',
                    kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN Block 53 shape is               : ' , KB.int_shape(x))                    

    x = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    print('   FCN Block 54 (Max pooling) shape is : ' , KB.int_shape(x))    

    ##-------------------------------------------------------------------------------------------------------
    ## FCN32 Specific Structure 
    ##-------------------------------------------------------------------------------------------------------
    # Convolutional layers transfered from fully-connected layers
    # changed from 4096 to 2048 - reduction of weights from 42,752,644 to                       
    # changed ftom 2048 to 1024 - 11-05-2018
    # FC_SIZE = 2048 
    FC_SIZE = 4096
    x = KL.Conv2D(FC_SIZE, (7, 7), activation='relu', padding='same', name='fcn32_fc1',
                  kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print()
    print('   --- FCN32 ----------------------------')
    print('   FCN fully connected 1 (fc1) shape   : ' , KB.int_shape(x))        
    x = KL.Dropout(0.5)(x)
    x = KL.Conv2D(FC_SIZE, (1, 1), activation='relu', padding='same', name='fcn32_fc2',
                  kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=l2(weight_decay))(x)
    print('   FCN fully connected 2 (fc2) shape   : ' , KB.int_shape(x))        
    x = KL.Dropout(0.5)(x)
    
    # Classifying layer
    x = KL.Conv2D(num_classes, (1, 1), activation='linear', padding='valid', strides=(1, 1), name='fcn32_deconv2D',
                  kernel_initializer='glorot_uniform',
                  # kernel_initializer='he_normal', 
                  bias_initializer='zeros',  kernel_regularizer=l2(weight_decay)) (x)
                  
    print('   FCN conv2d (fcn32_deconv2D) shape   : ' , x.get_shape(),' keras_tensor ', KB.is_keras_tensor(x))                      
    
    ##-------------------------------------------------------------------------------------------------------
    ## FCN16 Specific Structure 
    ##-------------------------------------------------------------------------------------------------------
    # Score Pool4
    scorePool4 = KL.Conv2D(num_classes, (1,1), activation="relu", padding="valid", name="fcn16_score_pool4",
                  kernel_initializer='glorot_uniform', bias_initializer='zeros', 
                  kernel_regularizer=l2(weight_decay)) (Pool4)                    
    print()
    print('   --- FCN16 ----------------------------')
    print('   FCN scorePool4 (Conv2D(Pool4)) shape is                   : ' , KB.int_shape(scorePool4),
          '   keras_tensor ', KB.is_keras_tensor(scorePool4))                      
                  
     # 2x Upsampling to generate Score2 (padding was originally "valid")
    x  = KL.Deconv2D(num_classes,kernel_size=(4,4), strides = (2,2),  name = "fcn16_score2",
                  padding = "valid", activation=None,
                  kernel_initializer='glorot_uniform', bias_initializer='zeros',
                  kernel_regularizer=l2(weight_decay)) (x)

    print('   FCN 2x Upsampling (Deconvolution2D(fcn32_classify)) shape : ' , KB.int_shape(x),
          '   keras_tensor ', KB.is_keras_tensor(x))                      
          
    score2_c = KL.Cropping2D(cropping=((1,1),(1,1)), name="fcn16_crop_score2" )(x)
                                
    print('   FCN 2x Upsampling/Cropped (Cropped2D(score2)) shape       : ' , KB.int_shape(score2_c),
          '   keras_tensor ', KB.is_keras_tensor(score2_c))
   
    # Sum Score2, Pool4
    x = KL.Add(name="fcn16_fuse_pool4")([score2_c,scorePool4])    
    print('   FCN Add Score2,scorePool4 Add(score2_c, scorePool4) shape : ' , KB.int_shape(x), 
          '   keras_tensor ', KB.is_keras_tensor(x))                      

    # Upsampling  (padding was originally "valid", I changed it to "same" )
    upscore_pool4 = KL.Deconv2D(num_classes, kernel_size=(4,4), strides = (2,2), name = "fcn16_upscore_pool4",
                    padding = "same", activation = None, 
                    kernel_initializer='glorot_uniform', bias_initializer='zeros',
                    kernel_regularizer=l2(weight_decay)) (x)
                                       
    print('   FCN upscore_pool4 (Deconv(fuse_Pool4)) shape              : ' , KB.int_shape(upscore_pool4),
          '   keras_tensor ', KB.is_keras_tensor(upscore_pool4))                          
    
    ##-------------------------------------------------------------------------------------------------------
    ## FCN8 Specific Structure 
    ##-------------------------------------------------------------------------------------------------------
    # Score Pool3
    scorePool3 = KL.Conv2D(num_classes, (1,1), activation="relu", padding="valid", name="fcn8_score_pool3", 
                           kernel_initializer='glorot_uniform',  bias_initializer='zeros', 
                           kernel_regularizer=l2(weight_decay))(Pool3)                    
    print()
    print('   --- FCN8 ----------------------------')
    print('   FCN scorePool4 (Conv2D(Pool4)) shape                      : ' , KB.int_shape(scorePool3),
          '   keras_tensor ', KB.is_keras_tensor(scorePool3))                      

    upscore_pool4_c = KL.Cropping2D(cropping=((0,0),(0,0)), name="fcn8_crop_pool4")(upscore_pool4)
    
    print('   FCN 2x Upsampling/Cropped (Cropped2D(score2)) shape       : ' , KB.int_shape(upscore_pool4_c),
          '   keras_tensor ', KB.is_keras_tensor(upscore_pool4_c))                      

    # Sum  upscore_pool4_c, scorePool3
    x = KL.Add(name="fcn8_fuse_pool3")([upscore_pool4_c,scorePool3])    
    
    print('   FCN Add Score2,scorePool4 shape is                        : ' ,  KB.int_shape(x), 
          '   keras_tensor ', KB.is_keras_tensor(x))                      

    
    ##-------------------------------------------------------------------------------------------------------
    ## fcn output heatmap
    ##-------------------------------------------------------------------------------------------------------    
    # Upsampling  (padding was originally "valid", I changed it to "same" )
    fcn_hm = KL.Deconvolution2D(num_classes, kernel_size=(16,16), strides = (8,8), name = "fcn8_heatmap", 
                           padding = "same", activation = None,
                           kernel_initializer='glorot_uniform', bias_initializer='zeros',
                           kernel_regularizer=l2(weight_decay))(x)

    fcn_hm.set_shape(feature_map.shape)
    logt('FCN fcn8_classify/heatmap  (Deconv(fuse_Pool4)) ' , fcn_hm, verbose = verbose) 
    fcn_hm = KL.Lambda(lambda z: tf.identity(z, name="fcn_hm"), name='fcn_heatmap_lambda') (fcn_hm)
    logt('fcn_hm (final)' , fcn_hm, verbose = verbose) 
    print()

    # fcn_classify_shape = KB.int_shape(fcn_hm)
    # h_factor = height / fcn_classify_shape[1]
    # w_factor = width  / fcn_classify_shape[2]
    # print('   fcn_classify_shape:',fcn_classify_shape,'   h_factor : ', h_factor, '  w_factor : ', w_factor
    # x = BilinearUpSampling2D(size=(h_factor, w_factor), name='fcn_bilinear')(x)
    # print('   FCN Bilinear upsmapling layer  shape is : ' , KB.int_shape(x), ' Keras tensor ', KB.is_keras_tensor(x) )  
    
    ##-------------------------------------------------------------------------------------------------------
    ## fcn_heatmap
    ##-------------------------------------------------------------------------------------------------------    
    fcn_sm = KL.Activation("softmax", name = "fcn8_softmax")(fcn_hm)
    logt('fcn8_softmax  ', fcn_sm, verbose = verbose) 
    fcn_sm = KL.Lambda(lambda z: tf.identity(z, name="fcn_sm"), name='fcn_softmax_lambda') (fcn_hm)
    logt('fcn_sm (final)', fcn_sm, verbose = verbose) 
    print()


    return fcn_hm , fcn_sm
    #,fcn_softmax   # fcn_hm_norm, fcn_hm_L2norm


    #---------------------------------------------------------------------------------------------
    # heatmap L2 normalization
    # Normalization using the  `gauss_sum` (batchsize , num_classes, height, width) 
    # 17-05-2018 (New method, replace dthe previous method that usedthe transposed gauss sum
    # 17-05-2018 Replaced with normalization across the CLASS axis 
    #                         normalize along the CLASS axis 
    #---------------------------------------------------------------------------------------------
    # print('\n    L2 normalization ------------------------------------------------------')   
    # fcn_hm_L2norm = KL.Lambda(lambda z: tf.nn.l2_normalize(z, axis = 3, name = 'fcn_heatmap_L2norm'),\
                        # name = 'fcn_heatmap_L2norm')(x)
    # print('\n    normalization ------------------------------------------------------')   
    # fcn_hm_norm   = KL.Lambda(normalize, name="fcn_heatmap_norm") (x)
    


    
Ejemplo n.º 11
0
def Deconv(x, f_dim):
    return KL.Deconv2D(filters=f_dim,
                       kernel_size=(5, 5),
                       strides=(2, 2),
                       kernel_initializer=KI.random_normal(stddev=0.02),
                       padding='same')(x)
Ejemplo n.º 12
0
def create_model(input_shape):
    network = models.Sequential()

    network.add(
        layers.Conv2D(4, (3, 3),
                      activation='relu',
                      padding='same',
                      input_shape=input_shape))
    network.add(
        layers.Conv2D(8, (3, 3),
                      activation='relu',
                      padding='same',
                      input_shape=input_shape))
    network.add(
        layers.Conv2D(16, (3, 3),
                      activation='relu',
                      padding='same',
                      input_shape=input_shape))
    network.add(
        layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      input_shape=input_shape))
    network.add(
        layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      input_shape=input_shape))

    network.add(
        layers.Deconv2D(64, (3, 3),
                        activation='relu',
                        padding='same',
                        input_shape=input_shape))
    network.add(
        layers.Deconv2D(32, (3, 3),
                        activation='relu',
                        padding='same',
                        input_shape=input_shape))
    network.add(
        layers.Deconv2D(16, (3, 3),
                        activation='relu',
                        padding='same',
                        input_shape=input_shape))
    network.add(
        layers.Deconv2D(8, (3, 3),
                        activation='relu',
                        padding='same',
                        input_shape=input_shape))
    network.add(
        layers.Deconv2D(4, (3, 3),
                        activation='relu',
                        padding='same',
                        input_shape=input_shape))
    network.add(
        layers.Deconv2D(3, (3, 3), activation='sigmoid', padding='same'))

    network.compile(optimizer='adam', loss='mse')

    network.summary()
    return network
IMG_SHAPE = data.shape[1:]
# plt.imshow(data[np.random.randint(data.shape[0])], cmap='gray', interpolation='none')
# plt.show()

import tensorflow as tf
import keras
from keras.models import Sequential
import keras.layers as L

CODE_SIZE = 256
generator = Sequential()
generator.add(L.InputLayer([CODE_SIZE], name='noise'))
generator.add(L.Dense(10*8*8, activation='elu', kernel_initializer="glorot_normal"))

generator.add(L.Reshape((8, 8, 10)))
generator.add(L.Deconv2D(64, kernel_size=[5, 5], activation='elu',kernel_initializer="glorot_normal"))
generator.add(L.Deconv2D(64, kernel_size=[5, 5], activation='elu',kernel_initializer="glorot_normal"))
generator.add(L.UpSampling2D(size=(2, 2)))
generator.add(L.Deconv2D(32, kernel_size=3, activation='elu',kernel_initializer="glorot_normal"))
generator.add(L.Deconv2D(32, kernel_size=3, activation='elu',kernel_initializer="glorot_normal"))
generator.add(L.Deconv2D(32, kernel_size=3, activation='elu',kernel_initializer="glorot_normal"))

generator.add(L.Conv2D(3, kernel_size=3, activation=None))
assert generator.output_shape[1:] == IMG_SHAPE
generator.summary()

discriminator = Sequential()
discriminator.add(L.InputLayer(IMG_SHAPE))

# discriminator.add(L.Conv2D(filters=32, kernel_size=[3, 3], kernel_initializer=tf.truncated_normal_initializer(), padding='same', activation='elu'))
# discriminator.add(L.MaxPooling2D(pool_size=[2, 2], padding='same'))