def make_generator_model():
    model = tf.keras.Sequential()
    model.add(Dense(7 * 7 * 256, use_bias=False, input_shape=(100, )))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Reshape((7, 7, 256)))

    model.add(C2DT(128, (5, 5), strides=(1, 1), padding='same',
                   use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation("relu"))

    model.add(C2DT(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    model.add(BatchNormalization())
    model.add(Activation("relu"))

    model.add(
        C2DT(3, (5, 5),
             strides=(2, 2),
             padding='same',
             use_bias=False,
             activation='tanh'))

    assert model.output_shape == (None, 28, 28, 3)
    print("========\ngenerator")
    print(model.summary())
    return model
Beispiel #2
0
def make_generator_model():
    model = tf.keras.Sequential()
    init = RandomNormal(mean=0.0, stddev=0.02)

    model.add(Dense(7 * 7 * 128, kernel_initializer=init, input_shape=(100, )))
    model.add(Reshape((7, 7, 128)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))

    model.add(C2DT(64, 5, strides=2, padding='same', kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))

    #     model.add(C2DT(32, 5, strides=2, padding='same', kernel_initializer=init))
    #     model.add(BatchNormalization())
    #     model.add(LeakyReLU(alpha=0.2))

    model.add(
        C2DT(3,
             5,
             strides=2,
             padding='same',
             activation='tanh',
             kernel_initializer=init))

    print(model.summary())

    assert model.output_shape == (None, 28, 28, 3)
    return model
Beispiel #3
0
def make_generator_model():
    model = tf.keras.Sequential()
    # start with 4x4 image
    model.add(Dense(4 * 4 * 256, use_bias=False, input_dim=100))
    #model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Reshape((4, 4, 256)))
    # upsample to 8x8
    model.add(C2DT(128, (4, 4), strides=(2, 2), padding='same',
                   use_bias=False))
    #model.add(BatchNormalization())
    model.add(LeakyReLU())
    # upsample to 16x16
    model.add(C2DT(128, (4, 4), strides=(2, 2), padding='same',
                   use_bias=False))
    #model.add(BatchNormalization())
    model.add(LeakyReLU())
    # upsample to 32x32
    model.add(C2DT(128, (4, 4), strides=(2, 2), padding='same',
                   use_bias=False))
    #model.add(BatchNormalization())
    model.add(LeakyReLU())
    # output layer
    model.add(Conv2D(3, (3, 3), activation='tanh', padding='same'))
    assert model.output_shape == (None, 32, 32, 3)
    return model
Beispiel #4
0
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(Dense(7*7*256, use_bias=False, input_shape=(100,)))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Reshape((7, 7, 256)))
    model.add(C2DT(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(C2DT(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(C2DT(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
    assert model.output_shape == (None, 56, 56, 1)
    return model
Beispiel #5
0
def make_generator_model():
    # changed activation func relu to leakyrelu. https://github.com/soumith/ganhacks#6-use-soft-and-noisy-labels tip #5
    # add gaussian weight init

    model = tf.keras.Sequential()
    init = RandomNormal(mean=0.0, stddev=0.02)

    model.add(Dense(7 * 7 * 128, kernel_initializer=init, input_shape=(100, )))
    model.add(Reshape((7, 7, 128)))  # prøv med 256
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))

    model.add(
        C2DT(64, (5, 5),
             strides=(2, 2),
             padding='same',
             kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))

    model.add(
        C2DT(32, (5, 5),
             strides=(2, 2),
             padding='same',
             kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))

    model.add(
        C2DT(1, (5, 5),
             strides=(2, 2),
             padding='same',
             activation='tanh',
             kernel_initializer=init))

    print(model.summary())

    assert model.output_shape == (None, 56, 56, 1)

    return model
def define_generator_model(k):
    model = Sequential()  # 初始化一个序列(网络),接下里会向序列中add层
    #全连接层,
    model.add(Dense(7 * 7 * 256, use_bias=False, input_shape=(k, )))
    model.add(BatchNormalization())
    model.add(LeakyReLU())  # 添加了一个很小的γ斜率,避免x<0的永远不会被激活
    #
    model.add(Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256)  # 不限制batch size
    #第一个转置卷积层,s=1
    model.add(C2DT(128, (5, 5), strides=(1, 1), padding='same',
                   use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    #第二个转置卷积层,s=2,做0填充
    model.add(C2DT(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    #第三个转置卷积层,filter=32,kernal_size=5,s=2,做0填充
    model.add(C2DT(32, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 28, 28, 32)
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    #第四个转置卷积层,
    model.add(C2DT(16, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 56, 56, 16)
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    #第五个转置卷积层,filter=1,kernal_size=5,s=2,做0填充
    model.add(
        C2DT(1, (5, 5),
             strides=(2, 2),
             padding='same',
             use_bias=False,
             activation='tanh'))
    assert model.output_shape == (None, 112, 112, 1)

    return model
Beispiel #7
0
def make_generator_model():
    model = Sequential()
    init = RandomNormal(mean=0.0, stddev=0.02)

    model.add(
        Dense(7 * 7 * 128,
              kernel_initializer=init,
              use_bias=False,
              input_shape=(100, )))
    model.add(Reshape((7, 7, 128)))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))

    model.add(
        C2DT(64, (5, 5),
             strides=(2, 2),
             kernel_initializer=init,
             padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))

    #     model.add(C2DT(64, (5, 5), strides=(2, 2), kernel_initializer=init, padding='same'))
    #     model.add(BatchNormalization())
    #     model.add(LeakyReLU(alpha=0.2))

    #     model.add(C2DT(32, (5, 5), strides=(2, 2), kernel_initializer=init, padding='same'))
    #     model.add(BatchNormalization())
    #     model.add(LeakyReLU(alpha=0.2))

    model.add(
        C2DT(1, (5, 5),
             strides=(2, 2),
             padding='same',
             kernel_initializer=init,
             activation='tanh'))

    print(model.output_shape)
    assert model.output_shape == (None, 28, 28, 1)
    return model