Esempio n. 1
0
def upsample(units,
             input_shape=None,
             apply_dropout=False,
             layer_type='dense',
             output_padding=(1, 1)):
    initializer = random_normal_initializer(0., 0.02)

    seq = Sequential()
    if layer_type == 'dense':
        seq.add(
            layers.Dense(units,
                         input_shape=[
                             input_shape,
                         ],
                         kernel_initializer=initializer,
                         use_bias=False))
    elif layer_type == 'conv':
        seq.add(
            layers.Conv2DTranspose(filters=units,
                                   kernel_size=3,
                                   strides=(2, 2),
                                   padding='same',
                                   input_shape=input_shape,
                                   kernel_initializer=initializer,
                                   use_bias=False,
                                   output_padding=output_padding))
    else:
        raise ValueError('wrong layer_type!')
    seq.add(layers.BatchNormalization())
    if apply_dropout:
        seq.add(layers.Dropout(0.5))
    seq.add(layers.ReLU())

    return seq
Esempio n. 2
0
def downsample(units,
               input_shape=None,
               apply_batchnorm=True,
               layer_type='dense'):

    initializer = random_normal_initializer(0., 0.02)

    seq = Sequential()
    if layer_type == 'dense':
        seq.add(
            layers.Dense(units,
                         input_shape=[
                             input_shape,
                         ],
                         kernel_initializer=initializer,
                         use_bias=False))
    elif layer_type == 'conv':
        seq.add(
            layers.Conv2D(filters=units,
                          kernel_size=3,
                          strides=(2, 2),
                          padding='same',
                          input_shape=input_shape,
                          kernel_initializer=initializer,
                          use_bias=False))
    else:
        raise ValueError('wrong layer type!')
    if apply_batchnorm:
        seq.add(layers.BatchNormalization())

    seq.add(layers.LeakyReLU())
    return seq
Esempio n. 3
0
def pix2pix_discriminator():

    initializer = random_normal_initializer(0., 0.02)

    inputs = layers.Input(shape=[99], name='cp')

    x = downsample(64, 99, False)(inputs)
    x = downsample(128, 64)(x)
    x = downsample(256, 128)(x)
    x = downsample(512, 256)(x)
    last = layers.Dense(1, kernel_initializer=initializer)(x)

    return Model(inputs=inputs, outputs=last)
Esempio n. 4
0
def pix2pix_generator(c_dim, z_dim=None):
    down_stack = [
        downsample(units=1024,
                   input_shape=c_dim + z_dim,
                   apply_batchnorm=False),
        downsample(units=512, input_shape=1024),
        downsample(units=256, input_shape=512),
        downsample(units=128, input_shape=256),
        downsample(units=64, input_shape=128),
        downsample(units=64, input_shape=64),
    ]

    up_stack = [
        upsample(units=64, input_shape=64, apply_dropout=False),
        upsample(units=128, input_shape=128, apply_dropout=False),
        upsample(units=256, input_shape=256),
        upsample(units=512, input_shape=512),
        upsample(units=1024, input_shape=1024),
    ]
    initializer = random_normal_initializer(0., 0.02)
    last = layers.Dense(OUTPUT_SIZE,
                        kernel_initializer=initializer,
                        activation='tanh')

    inputs = layers.Input(shape=[c_dim])
    if z_dim:
        z = layers.Input(shape=[z_dim])
        x = layers.concatenate([inputs, z])
        inp = [inputs, z]
    else:
        x = inputs
        inp = inputs

    skips = []
    for down in down_stack:
        x = down(x)
        skips.append(x)

    skips = reversed(skips[:-1])

    for up, skip in zip(up_stack, skips):
        x = up(x)
        x = layers.concatenate([x, skip])

    x = last(x)

    return Model(inputs=inp, outputs=x)
Esempio n. 5
0
def discriminator_2d():
    initializer = random_normal_initializer(0., 0.02)
    inputs = layers.Input(shape=[20, 49, 1])

    x = downsample(32, input_shape=[20, 49, 1], layer_type='conv')(inputs)
    x = downsample(64, input_shape=[10, 25, 32], layer_type='conv')(x)
    x = downsample(128, input_shape=[5, 13, 64], layer_type='conv')(x)
    x = layers.Conv2D(256,
                      kernel_size=3,
                      strides=1,
                      kernel_initializer=initializer,
                      use_bias=False,
                      padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(1,
                      kernel_size=3,
                      strides=1,
                      kernel_initializer=initializer,
                      padding='same')(x)

    return Model(inputs, x)
Esempio n. 6
0
def generator_2d(inputs_dim, z_dim):
    down_stack = [
        downsample(32,
                   input_shape=[20, 49, inputs_dim + z_dim],
                   apply_batchnorm=False,
                   layer_type='conv'),
        downsample(64, input_shape=[10, 25, 32], layer_type='conv'),
        downsample(128, input_shape=[5, 13, 64], layer_type='conv'),
        downsample(256, input_shape=[3, 7, 128], layer_type='conv'),
        downsample(256, input_shape=[2, 4, 256], layer_type='conv'),
    ]

    up_stack = [
        upsample(256,
                 input_shape=[1, 2, 256],
                 apply_dropout=False,
                 layer_type='conv',
                 output_padding=(1, 1)),
        upsample(128,
                 input_shape=[2, 4, 512],
                 layer_type='conv',
                 output_padding=(0, 0)),
        upsample(64,
                 input_shape=[3, 7, 256],
                 layer_type='conv',
                 output_padding=(0, 0)),
        upsample(32,
                 input_shape=[5, 13, 128],
                 layer_type='conv',
                 output_padding=(1, 0)),
    ]
    initializer = random_normal_initializer(0., 0.02)
    last = layers.Conv2DTranspose(1,
                                  kernel_size=3,
                                  strides=2,
                                  padding='same',
                                  output_padding=(1, 0),
                                  kernel_initializer=initializer,
                                  activation='tanh')

    inputs = layers.Input(shape=[20, 49, inputs_dim])
    if z_dim:
        z = layers.Input(shape=[20, 49, z_dim])
        x = layers.concatenate([inputs, z])
        inp = [inputs, z]
    else:
        x = inputs
        inp = inputs

    skips = []
    for down in down_stack:
        x = down(x)
        skips.append(x)

    skips = reversed(skips[:-1])

    for up, skip in zip(up_stack, skips):
        x = up(x)
        x = layers.concatenate([x, skip])

    x = last(x)

    return Model(inputs=inp, outputs=x)