コード例 #1
0
ファイル: VAC.py プロジェクト: nerdk312/VAE_CNN_Keras_TF2.0
 def _build_decoder_layers(self):
     latent_inputs = KL.Input(shape=(self.latent_dim, ), name='z_sampling')
     x = KL.Dense(self.shape[1] * self.shape[2] * self.shape[3],
                  activation='relu')(latent_inputs)
     x = KL.Reshape((self.shape[1], self.shape[2], self.shape[3]))(x)
     for i in range(2):
         x = KL.Conv2DTranspose(filters=self.filters,
                                kernel_size=self.kernel_size,
                                activation='relu',
                                strides=2,
                                padding='same')(x)
         self.filters //= 2
     #one more time with filer = 1 to have the correct output shape
     decoder_out = KL.Conv2DTranspose(filters=1,
                                      kernel_size=self.kernel_size,
                                      activation='sigmoid',
                                      padding='same',
                                      name='decoder_output')(x)
     decoder = Model(inputs=latent_inputs,
                     outputs=decoder_out,
                     name='decoder')
     decoder.summary()
     return decoder
コード例 #2
0
ファイル: AnimeFaces.py プロジェクト: ApproachQiian/GitRepos1
def generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(8 * 8 * 256, input_shape=(100,), use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((8, 8, 256)))

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(32, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))

    return model
コード例 #3
0
ファイル: dcgan.py プロジェクト: 2019125078/DCGAN_tensorflow2
def make_generator_model(output_ch):
    model = tf.keras.Sequential(name='Generator')

    init_ch = 1024

    model.add(
        layers.Dense(
            init_ch,
            input_shape=(100, ),
            use_bias=False,
            kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    model.add(layers.ReLU())
    model.add(layers.Reshape((1, 1, init_ch)))

    ch = init_ch // 2
    for i in range(5):
        model.add(
            layers.Conv2DTranspose(
                ch, (5, 5), (2, 2),
                padding='same',
                use_bias=False,
                kernel_initializer=initializers.RandomNormal(stddev=0.02)))
        model.add(layers.BatchNormalization())
        model.add(layers.ReLU())
        ch = ch // 2

    model.add(
        layers.Conv2DTranspose(
            output_ch, (5, 5), (2, 2),
            padding='same',
            activation='tanh',
            use_bias=False,
            kernel_initializer=initializers.RandomNormal(stddev=0.02)))

    model.summary()

    return model
コード例 #4
0
    def create_generator(self):
        filters = self.cfg.filters_gen
        shape = [self.cfg.img_w // 2**4, self.cfg.img_h // 2**4, filters]
        model = tf.keras.Sequential()
        model.add(layers.InputLayer(input_shape=(self.cfg.z_dim, )))
        model.add(layers.Dense(tf.reduce_prod(shape)))
        model.add(layers.Reshape(shape))
        model.add(layers.ReLU())
        # [b, 4, 4, filters] -> [b, 32, 32, filters//2**3]
        for i in range(3):
            filters //= 2
            model.add(
                SpectralNormalization(
                    layers.Conv2DTranspose(filters,
                                           4,
                                           2,
                                           'same',
                                           use_bias=False)))
            model.add(layers.BatchNormalization())
            model.add(layers.LeakyReLU(self.cfg.leakrelu_alpha))

        model.add(Attention(channels=filters))
        # [b, 32, 32, filter//2**3] -> [b, 64, 64, filter//2**4]
        model.add(
            SpectralNormalization(
                layers.Conv2DTranspose(filters // 2,
                                       4,
                                       2,
                                       'same',
                                       use_bias=False)))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU(self.cfg.leakrelu_alpha))
        model.add(Attention(channels=filters // 2))
        # [b, w, h, filters//2**4] -> [b, w, h, 3]
        model.add(layers.Conv2DTranspose(3, 3, 1, 'same', activation='tanh'))

        return model
コード例 #5
0
 def __init__(self, ngf, latent_space, out_channels, output_dim):
     super(Generator, self).__init__()
     self.ngf = ngf
     self.output_dim = output_dim
     self.main = tf.keras.Sequential([
         layers.Dense(4 * 4 * 8 * self.ngf,
                      use_bias=False,
                      input_shape=(latent_space, )),
         layers.ReLU(),
         layers.Reshape((4, 4, 8 * self.ngf)),
         layers.Conv2DTranspose(self.ngf * 4,
                                kernel_size=(4, 4),
                                strides=1,
                                padding="valid",
                                use_bias=False),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Conv2DTranspose(self.ngf * 2,
                                kernel_size=(4, 4),
                                strides=2,
                                padding="valid",
                                use_bias=False),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Conv2DTranspose(self.ngf,
                                kernel_size=(4, 4),
                                strides=2,
                                padding="same",
                                use_bias=False),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Conv2DTranspose(out_channels,
                                kernel_size=(4, 4),
                                strides=2,
                                padding="same",
                                use_bias=False),
     ])
コード例 #6
0
def get_decoder(latent_dim):
    latent_inputs = keras.Input(shape=(latent_dim, ))
    x = layers.Dense(7 * 7 * 64, activation="relu")(latent_inputs)
    x = layers.Reshape((7, 7, 64))(x)
    x = layers.Conv2DTranspose(512,
                               4,
                               activation=None,
                               strides=3,
                               padding="same",
                               use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    x = layers.Conv2DTranspose(256,
                               4,
                               activation=None,
                               strides=2,
                               padding="same",
                               use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    x = layers.Conv2DTranspose(256,
                               4,
                               activation=None,
                               strides=2,
                               padding="same",
                               use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    x = layers.Conv2DTranspose(128,
                               4,
                               activation=None,
                               strides=1,
                               padding="same",
                               use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    x = layers.Conv2DTranspose(64,
                               4,
                               activation=None,
                               strides=1,
                               padding="same",
                               use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    decoder_outputs = layers.Conv2DTranspose(3,
                                             4,
                                             activation="sigmoid",
                                             padding="same")(x)
    decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
    decoder.summary()

    return decoder
コード例 #7
0
ファイル: resnet.py プロジェクト: bruinxiong/NCSN-TF2.0
    def __init__(self,
                 activation,
                 filters,
                 is_encoder,
                 kernel_size=3,
                 resize=False):
        super(ResidualBlock, self).__init__()

        # FIXME: THEY DON'T MENTION WHAT KIND OF NORMALIZATION IS USED, I ASSUMED BN, BUT THEY USE GROUP NORMALIZATION, NOT SURE IF THIS MAKES A DIFFERENCE?
        self.norm1 = layers.BatchNormalization()
        self.norm2 = layers.BatchNormalization()
        self.activation = activation

        self.filters = filters
        self.is_encoder = is_encoder
        self.resize = resize
        self.strides = strides = 2 if resize else 1
        padding = 'same'  # if strides == 1 else 'valid'

        if is_encoder:
            self.conv1 = layers.Conv2D(filters,
                                       kernel_size,
                                       strides=strides,
                                       padding=padding)
            self.conv2 = layers.Conv2D(filters,
                                       kernel_size,
                                       strides=1,
                                       padding='same')
        else:
            self.conv1 = layers.Conv2DTranspose(filters,
                                                kernel_size,
                                                strides=1,
                                                padding='same')
            self.conv2 = layers.Conv2DTranspose(filters,
                                                kernel_size,
                                                strides=strides,
                                                padding=padding)
コード例 #8
0
ファイル: degan_ae.py プロジェクト: Alexx882/ImageGenerator
    def build_decoder(self, latent_dim):
        '''
        Build the encoder network.
        '''
        latent_inputs = keras.Input(shape=(latent_dim, ))
        x = layers.Dense(128, activation="relu")(latent_inputs)
        x = layers.Dense(784, activation="relu")(x)
        x = layers.Reshape((28, 28, 1))(x)

        x = layers.Conv2DTranspose(128,
                                   3,
                                   activation="relu",
                                   strides=2,
                                   padding="same")(x)
        x = layers.Conv2DTranspose(64,
                                   3,
                                   activation="relu",
                                   strides=2,
                                   padding="same")(x)
        x = layers.Conv2DTranspose(32,
                                   3,
                                   activation="relu",
                                   strides=2,
                                   padding="same")(x)
        x = layers.Conv2DTranspose(1, 5, activation="sigmoid",
                                   padding="same")(x)
        x = layers.Flatten()(x)
        x = layers.Dense(784)(x)
        x = layers.Reshape((28, 28, 1))(x)

        decoder = keras.Model(latent_inputs, x, name="decoder")
        # decoder.summary()

        if self.load:
            decoder.load_weights(f"{self.path}/decoder_weights.h5")

        return decoder
コード例 #9
0
ファイル: GAN.py プロジェクト: kuroipearls/dcgan_tf2keras
    def __init__(self, args):
        super(GAN, self).__init__()
        self.z_dim = args.z_dim

        discriminator = keras.Sequential(
            [
                keras.Input(shape=(28, 28, 1)),
                layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
                layers.LeakyReLU(alpha=0.2),
                layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
                layers.LeakyReLU(alpha=0.2),
                layers.GlobalMaxPooling2D(),
                layers.Dense(1),
            ],
            name="discriminator",
        )

        generator = keras.Sequential(
            [
                keras.Input(shape=(self.z_dim, )),
                # We want to generate 128 coefficients to reshape into a 7x7x128 map
                layers.Dense(7 * 7 * 128),
                layers.LeakyReLU(alpha=0.2),
                layers.Reshape((7, 7, 128)),
                layers.Conv2DTranspose(
                    128, (4, 4), strides=(2, 2), padding="same"),
                layers.LeakyReLU(alpha=0.2),
                layers.Conv2DTranspose(
                    128, (4, 4), strides=(2, 2), padding="same"),
                layers.LeakyReLU(alpha=0.2),
                layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
            ],
            name="generator",
        )

        self.discriminator = discriminator
        self.generator = generator
コード例 #10
0
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100, )))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256
                                  )  # Note: None is the batch size

    model.add(
        layers.Conv2DTranspose(128, (5, 5),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(64, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(1, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    return model
コード例 #11
0
ファイル: VAE.py プロジェクト: MyRespect/CNN_Application
 def __init__(self, latent_dim):
     super(CVAE, self).__init__()
     self.latent_dim = latent_dim
     self.inference_net = tf.keras.Sequential([
         layers.InputLayer(input_shape=(28, 28, 1)),
         layers.Conv2D(filters=32,
                       kernel_size=3,
                       strides=(2, 2),
                       activation='relu'),
         layers.Conv2D(filters=64,
                       kernel_size=3,
                       strides=(2, 2),
                       activation='relu'),
         layers.Flatten(),
         # No activation
         layers.Dense(latent_dim + latent_dim)
     ])
     self.generative_net = tf.keras.Sequential([
         layers.InputLayer(input_shape=(latent_dim, )),
         layers.Dense(units=7 * 7 * 32, activation='relu'),
         layers.Reshape(target_shape=(7, 7, 32)),
         layers.Conv2DTranspose(filters=64,
                                kernel_size=3,
                                strides=(2, 2),
                                padding='SAME',
                                activation='relu'),
         layers.Conv2DTranspose(filters=32,
                                kernel_size=3,
                                strides=(2, 2),
                                padding='SAME',
                                activation='relu'),
         layers.Conv2DTranspose(filters=1,
                                kernel_size=3,
                                strides=(1, 1),
                                padding='SAME')
     ])
コード例 #12
0
ファイル: resnet.py プロジェクト: bruinxiong/NCSN-TF2.0
    def __init__(self,
                 activation,
                 filters,
                 is_encoder,
                 kernel_size=3,
                 dilation=1,
                 resize=False):
        super(ConditionalResidualBlock, self).__init__()

        self.norm1 = ConditionalInstanceNormalizationPlusPlus2D()
        self.norm2 = ConditionalInstanceNormalizationPlusPlus2D()
        self.activation = activation
        self.resize = resize
        self.filters = filters
        self.is_encoder = is_encoder
        if is_encoder:
            self.conv1 = layers.Conv2D(filters,
                                       kernel_size,
                                       dilation_rate=(dilation, dilation),
                                       padding="same")
            self.conv2 = layers.Conv2D(filters,
                                       kernel_size,
                                       dilation_rate=(dilation, dilation),
                                       padding="same")
        else:
            self.conv1 = layers.Conv2DTranspose(filters,
                                                kernel_size,
                                                dilation_rate=(dilation,
                                                               dilation),
                                                padding="same")
            self.conv2 = layers.Conv2DTranspose(filters,
                                                kernel_size,
                                                dilation_rate=(dilation,
                                                               dilation),
                                                padding="same")
        self.adjust_skip = None
コード例 #13
0
ファイル: model2.py プロジェクト: lixumin-zai/listen
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(4 * 25 * 128, use_bias=False, input_shape=(100, )))

    model.add(layers.Reshape((4, 25, 128)))
    assert model.output_shape == (None, 4, 25, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(64, (4, 5),
                               strides=(1, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 4, 50, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    #
    model.add(
        layers.Conv2DTranspose(1, (4, 5),
                               strides=(1, 2),
                               padding='same',
                               use_bias=False))
    model.add(layers.LeakyReLU())
    assert model.output_shape == (None, 4, 100, 1)
    # model.add(layers.BatchNormalization())
    # # # #
    # model.add(layers.Conv2DTranspose(16, (2, 1), strides=(2, 1), padding='valid', use_bias=False))
    # assert model.output_shape == (None, 2, 100, 16)
    # model.add(layers.BatchNormalization())
    # model.add(layers.LeakyReLU())
    #
    # model.add(layers.Conv2DTranspose(1, (2, 1), strides=(2, 1), padding='valid', use_bias=False, activation='tanh'))
    # assert model.output_shape == (None, 4, 100, 1)
    # # #
    return model
コード例 #14
0
ファイル: ch12_GAN.py プロジェクト: Netfather/LearningSmth
    def __init__(self):
        super(Generator, self).__init__()
        # 从 [b,_Gen_Dimension]维度向量  扩展到  [b,64,64,3]
        # 这里 [b,_Gen_Dimension] 可以从已经训练好的Xception网络中获得
        self.fc1 = layers.Dense(3 * 3 * 512)
        # 然后reshape到 (batchsize,3,3,512)

        # 表示padding不同时输出lenght的判断
        # if output_padding is None:
        #     if padding == 'valid':
        #         # note the call to `max` below!
        #         length = input_length * stride + max(filter_size - stride, 0)
        #     elif padding == 'full':
        #         length = input_length * stride - (stride + filter_size - 2)
        #     elif padding == 'same':
        #         length = input_length * stride

        self.deconv1 = layers.Conv2DTranspose(256, 3, 3, 'valid')
        # 反卷积到 (batchsize,3*3 + max(3-3) = 9,9,256)
        self.bn1 = layers.BatchNormalization()
        self.deconv2 = layers.Conv2DTranspose(128, 5, 2, 'valid')
        # 反卷积到 (batchsize,9*2 + max(5-2) = 21,21,256)
        self.bn2 = layers.BatchNormalization()
        self.deconv3 = layers.Conv2DTranspose(3, 4, 3, 'valid')
コード例 #15
0
def autoencoder():
    # share layer mdoel
    encoder_input = keras.Input(shape=(28, 28, 1), name='img')
    x = layers.Conv2D(16, 3, activation='relu')(encoder_input)
    x = layers.Conv2D(32, 3, activation='relu')(x)
    x = layers.MaxPooling2D(3)(x)
    x = layers.Conv2D(32, 3, activation='relu')(x)
    x = layers.Conv2D(16, 3, activation='relu')(x)
    encoder_output = layers.GlobalMaxPooling2D()(x)

    encoder = keras.Model(encoder_input, encoder_output, name='encoder')
    encoder.summary()

    x = layers.Reshape((4, 4, 1))(encoder_output)
    x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
    x = layers.Conv2DTranspose(32, 3, activation='relu')(x)
    x = layers.UpSampling2D(3)(x)
    x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
    decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)

    autoencoder = keras.Model(encoder_input,
                              decoder_output,
                              name='autoencoder')
    autoencoder.summary()
コード例 #16
0
def conv_t(x, f, k, s, a, p, bn):
    x = layers.Conv2DTranspose(
        filters=f,
        kernel_size=k,
        padding=p,
        strides=s,
    )(x)
    if bn:
        x = layers.BatchNormalization(momentum=0.9)(x)
    if a == "relu":
        x = layers.Activation(a)(x)
    elif a == "lrelu":
        x = layers.LeakyReLU()(x)

    return x
コード例 #17
0
def conv_block(x,
               filters,
               kernel_size,
               strides,
               padding,
               dropout=False,
               up=False):
    if up:
        x = layers.Conv2DTranspose(filters, kernel_size, strides, padding)(x)
    else:
        x = layers.Conv2D(filters, kernel_size, strides, padding)(x)
    if dropout != False:
        x = layers.Dropout(dropout)(x)
    x = layers.ReLU()(x)
    return x
コード例 #18
0
def decoder_block(filters, size=3, strides=2, apply_instancenorm=True):
    initializer = tf.random_normal_initializer(0., 0.02)
    gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)

    block = keras.Sequential()
    #Transposed convolutional layer
    block.add(layers.Conv2DTranspose(filters, size, strides, padding='same', kernel_initializer=initializer, use_bias=False))

    #Normalization
    if apply_instancenorm:
        block.add(tfa.layers.InstanceNormalization(gamma_initializer=gamma_init))

    #Activation
    block.add(layers.LeakyReLU())
    return block
コード例 #19
0
def decoder_112x112(code_size):
    decoder = keras.models.Sequential()
    decoder.add(L.InputLayer((code_size, )))
    decoder.add(L.Dense(3 * 3 * 64, activation='elu'))
    decoder.add(L.Reshape([3, 3, 64]))
    decoder.add(
        L.Conv2DTranspose(filters=32,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='valid'))
    decoder.add(
        L.Conv2DTranspose(filters=16,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='same'))
    decoder.add(
        L.Conv2DTranspose(filters=8,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='same'))
    decoder.add(
        L.Conv2DTranspose(filters=4,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='elu',
                          padding='same'))
    decoder.add(
        L.Conv2DTranspose(filters=1,
                          kernel_size=(3, 3),
                          strides=2,
                          activation='sigmoid',
                          padding='same'))
    return decoder
コード例 #20
0
ファイル: model_cond.py プロジェクト: zouhir10/Tutoriels
def generator_model():
    entree_bruit =layers.Input(shape=(100), dtype='float32')
    entree_classe=layers.Input(shape=(10),  dtype='float32')

    result=layers.concatenate([entree_bruit, entree_classe])

    result=layers.Dense(7*7*256, use_bias=False)(result)
    result=layers.BatchNormalization()(result)
    result=layers.LeakyReLU()(result)

    result=layers.Reshape((7, 7, 256))(result)

    result=layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(result)
    result=layers.BatchNormalization()(result)
    result=layers.LeakyReLU()(result)

    result=layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(result)
    result=layers.BatchNormalization()(result)
    result=layers.LeakyReLU()(result)

    sortie=layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(result)

    model=models.Model(inputs=[entree_bruit, entree_classe], outputs=sortie)
    return model
コード例 #21
0
    def __make_generator_model(self):
        model = keras.Sequential()
        # Dense: transform input vector (latent dim) into 256 low resolution (7x7) images.
        # Note: 7 x 7 works with MNIST (final result is 28 x 28). We don't need bias here
        model.add(
            layers.Dense(256 * 7 * 7,
                         input_shape=(self.__latent_dim, ),
                         use_bias=False))
        # To try to keep mean 0 and std 1
        model.add(layers.BatchNormalization())
        # This reshapes the output into 256 7x7 "images"
        model.add(layers.Reshape((7, 7, 256)))
        # Conv2DTranspose is the opposite of convolution. First parameter: how many output images
        # Second parameter: kernel size (height and width of the window). Third parameter: multiplier of the two input dim
        # Padding to pad evenly, so that we don't loose data if the kernel size is not sub-multiple of the size of the input
        model.add(
            layers.Conv2DTranspose(128, (4, 4), strides=(1, 1),
                                   padding='same'))
        model.add(layers.BatchNormalization())
        # For GAN it is often used LeakyReLU as act. function
        model.add(layers.LeakyReLU(alpha=0.2))
        # Output here is 64 images of 14x14
        model.add(
            layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same'))
        model.add(layers.BatchNormalization())
        model.add(layers.LeakyReLU(alpha=0.2))
        # This will output a single image. Activation is tanh because we normalize the data to be between -1 and 1
        # Instead of 0-255 (black & white image)
        model.add(
            layers.Conv2DTranspose(1, (4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   activation='tanh'))
        assert model.output_shape == (None, 28, 28, 1)

        return model
コード例 #22
0
def build_decoder(latent_dim):
    
    z = layers.Input(shape=latent_dim)
    
    x = layers.Reshape([1,1,latent_dim])(z)
    
    x = layers.Conv2DTranspose(128,(1, 1), padding='valid',activation='relu')(x)
    x = layers.Conv2DTranspose(64,(3, 3), padding='valid',activation='relu')(x)
    x = layers.Conv2DTranspose(64,(3, 3), padding='valid',activation='relu')(x)
    x = layers.Conv2DTranspose(48,(3, 3), strides=(2, 2),padding='same',activation='relu')(x)
    x = layers.Conv2DTranspose(48,(3, 3), padding='valid',activation='relu')(x)
    x = layers.Conv2DTranspose(32,(3, 3), strides=(2, 2),padding='same',activation='relu')(x)
    x = layers.Conv2DTranspose(16,(3, 3), padding='valid',activation='relu')(x)

    output = layers.Conv2DTranspose(1,(3, 3), padding='valid',activation='sigmoid')(x)
    
    return Model(z, output, name='decoder')
コード例 #23
0
    def __init__(self, latent_dim) -> None:
        super().__init__()

        self.latent_dim = latent_dim
        self.inference_net = tf.keras.Sequential([
            layers.InputLayer(input_shape=[28, 28, 1]),
            layers.Conv2D(filters=32, kernel_size=3, strides=2, padding="same", activation="relu"),
            layers.Conv2D(filters=64, kernel_size=3, strides=2, padding="same", activation="relu"),

            layers.Flatten(),
            layers.Dense(latent_dim + latent_dim)
        ])

        self.generative_net = tf.keras.Sequential([
            layers.InputLayer(input_shape=[latent_dim]),

            layers.Dense(7*7*32, activation="relu"),
            layers.Reshape([7, 7, 32]),

            layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding="same", activation="relu"),
            layers.Conv2DTranspose(filters=32, kernel_size=3, strides=2, padding="same", activation="relu"),

            layers.Conv2DTranspose(filters=1, kernel_size=3, strides=1, padding="same", activation="relu"),
        ])
コード例 #24
0
    def __init__(self, latent_dim, input_shape=(32, 32, 3)):
        super(ConvVAE, self).__init__()
        self.latent_dim = latent_dim

        self.encoder = Sequential([
            layers.Input(input_shape),
            layers.Conv2D(32, 3, 2, activation='relu'),
            layers.Conv2D(64, 3, 2, activation='relu'),
            layers.Flatten(),
            layers.Dense(latent_dim + latent_dim)
        ])
        self.decoder = Sequential([
            layers.Dense(7 * 7 * 32, activation='relu'),
            layers.Reshape((7, 7, 32)),
            layers.Conv2DTranspose(64, 3, 2, padding='same',
                                   activation='relu'),
            layers.Conv2DTranspose(32, 3, 2, padding='same',
                                   activation='relu'),
            layers.Conv2DTranspose(3,
                                   3,
                                   1,
                                   padding='same',
                                   activation='sigmoid')
        ])
コード例 #25
0
def create_convolutional_decoder(data_shape):
    return models.Sequential(
        layers=[
            dense(np.product(data_shape)),
            layers.Reshape(data_shape),
            deconv(name="deconv1"),
            deconv(name="deconv2"),
            deconv(name="deconv3"),
            layers.Conv2DTranspose(filters=1,
                                   kernel_size=4,
                                   padding="same",
                                   name="output"),
        ],
        name="decoder",
    )
コード例 #26
0
def build_generator(latent_dim):

    model = tf.keras.Sequential()
    model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(latent_dim,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=1, padding='same', use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(64, (5, 5), strides=2, padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(1, (5, 5), strides=2, padding='same', use_bias=False, activation='sigmoid'))
    assert model.output_shape == (None, Img_H, Img_W, 1)

    return model
コード例 #27
0
def upsample(filters, size, apply_batch=False):
    initializer = tf.random_normal_initializer(0., 0.02)

    result = keras.Sequential()
    result.add(layers.Conv2DTranspose(filters, size, strides=2,
                                      padding='same',
                                      kernel_initializer=initializer,
                                      use_bias=False))

    if apply_batch:
        result.add(layers.BatchNormalization())#tfa.layers.InstanceNormalization())

    result.add(layers.ReLU())

    return result
コード例 #28
0
ファイル: mnist-gan-test.py プロジェクト: blueybloke/logo-gan
def make_generator_model():
    model = tf.keras.Sequential() # Create a linear stack of layers style model
    model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) # Start with a Dense (classic) NN layer, with seed as input
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256) # None is the batch size, make sure the model so far is outputting the right shape

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    
    model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    # model has been upsampled to the right shape, return it.
    return model
コード例 #29
0
def make_generator(input_shape):  # define generator
    return tf.keras.Sequential([
        layers.Dense(8 * 8 * 256, use_bias=False, input_shape=input_shape),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        layers.Reshape((8, 8, 256)),
        layers.Conv2DTranspose(128, (5, 5),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        layers.Conv2DTranspose(64, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        layers.Conv2DTranspose(1, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh')
    ])
コード例 #30
0
 def __init__(self, config):
     super(Generator, self).__init__()
     self.config = config
     self.gen = Sequential([
         layers.InputLayer(input_shape=(self.config.latent_dim, )),
         layers.Dense(1024),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Dense(8 * 8 * 128),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Reshape((8, 8, 128)),
         layers.Conv2DTranspose(filters=64,
                                kernel_size=4,
                                strides=2,
                                padding='same'),
         layers.BatchNormalization(),
         layers.ReLU(),
         layers.Conv2DTranspose(filters=1,
                                kernel_size=4,
                                strides=2,
                                padding='same',
                                activation='sigmoid'),
     ])