def main():

    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 0.002
    is_training = True

    img_path = glob.glob(
        r'C:\Users\Jackie Loong\Downloads\DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2-master\data\faces\*.jpg'
    )

    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):

        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        batch_x = next(db_iter)

        # train D
        with tf.GradientTape() as tape:
            d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x,
                               is_training)
        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(
            zip(grads, discriminator.trainable_variables))

        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss))

            z = tf.random.uniform([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('images', 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Esempio n. 2
0
def main():
    tf.random.set_seed(222)
    np.random.seed(222)
    z_dim=100
    epochs=3000000
    batch_size=512
    learning_rate=0.002
    is_training=True
    img_path=glob.glob(r'D:\BaiduNetdiskDownload\faces\faces\*.jpg')
    dataset,img_shape,_=make_anime_dataset(img_path,batch_size)
    print(dataset,img_shape)
    sample=next(iter(dataset))
    print(sample.shape)
    dataset=dataset.repeat()
    db_iter=iter(dataset)

    generator=Genrator()
    generator.build(input_shape=(None,z_dim))

    dis=Discriminator()
    dis.bulid(input_shape=(None,64,64,3))
    g_optimaizer=tf.optimizers.Adam(learning_rate=learning_rate,beta_1=0.50)
    d_optimaer=tf.optimizers.Adam(learning_rate=learning_rate,beta_2=0.5)
    for epoch in range(epochs):
        batch_z=tf.random.uniform([batch_size,z_dim],minval=-1,maxval=1)
        batch_x=next(db_iter)
        with tf.GradientTape() as tape:
            d_loss=d_loss_fn(generator,dis,batch_z,batch_x,is_training)
        grads=tape.gradient(d_loss,dis.trainable_varables)
        d_optimaer.apply_gradients(zip(grads,dis.trainable_varables))

        with tf.GradientTape() as tape:
Esempio n. 3
0
def main():
    tf.random.set_seed(233)
    np.random.seed(233)
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 0.0005
    is_training = True

    img_path = glob.glob(r'C:\Users\Jackie\Downloads\faces\*.jpg')
    assert len(img_path) > 0

    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape, tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))
    z_sample = tf.random.normal([100, z_dim])

    g_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):

        for _ in range(5):
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)

            # train D
            with tf.GradientTape() as tape:
                d_loss, gp = d_loss_fn(generator, discriminator, batch_z, batch_x, is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))

        batch_z = tf.random.normal([batch_size, z_dim])

        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss),
                  'gp:', float(gp))

            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('images', 'wgan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Esempio n. 4
0
def create_dataset(bs):
    img_path = glob.glob(r'data/faces/*.jpg')
    print('images: ', len(img_path))
    dataset, img_shape, _ = make_anime_dataset(img_path, bs, resize=64)
    print(dataset, img_shape)
    dataset = dataset.repeat(100)
    db_iter = iter(dataset)
    return db_iter
def main():
    # 设计随机种子,方便复现
    tf.random.set_seed(22)
    np.random.seed(22)
    # 设定相关参数
    z_dim = 100
    epochs = 3000000
    batch_size = 512  # 根据自己的GPU能力设计
    learning_rate = 0.002
    is_training = True
    # 加载数据(根据自己的路径更改),建立网络
    img_path = glob.glob(
        r'C:\Users\Jackie Loong\Downloads\DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2-master\data\faces\*.jpg'
    )
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    # print(dataset, img_shape)
    # sample = next(iter(dataset))
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))
    # 建立优化器
    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):
        # 随机取样出来的结果
        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        batch_x = next(db_iter)

        # 训练检测网络
        with tf.GradientTape() as tape:
            d_loss, gp = d_loss_fn(generator, discriminator, batch_z, batch_x,
                                   is_training)

        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(
            zip(grads, discriminator.trainable_variables))

        # 训练生成网络
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)

        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss),
                  'gp:', float(gp))
            z = tf.random.uniform([100, z_dim])
            fake_image = generator(z, training=False)
            # 生成的图片保存,images文件夹下, 图片名为:wgan-epoch.png
            img_path = os.path.join('images', 'wgan-%d.png' % epoch)
            # 10*10, 彩色图片
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Esempio n. 6
0
def main():
    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    z_dim = 100
    epochs = 3000000
    batch_size = 512  # same as the batch_size of real images
    d_learning_rate = 0.005
    g_learning_rate = 0.002
    training = True

    img_path = glob.glob(r'.\faces\*.jpg')
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample_picture = next(iter(dataset))
    print(sample_picture.shape, tf.reduce_max(sample_picture).numpy(), tf.reduce_min(sample_picture).numpy())
    dataset = dataset.repeat()
    ds_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))

    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    g_optimizer = tf.optimizers.RMSprop(learning_rate=g_learning_rate)
    d_optimizer = tf.optimizers.RMSprop(learning_rate=d_learning_rate)

    for epoch in range(epochs):
        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        batch_r = next(ds_iter)

        # discriminator training
        with tf.GradientTape() as tape:
            d_loss, gp = d_loss_func(generator, discriminator, batch_z, batch_r, training)
        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))

        if epoch % 5 == 0:
            with tf.GradientTape() as tape:
                g_loss = g_loss_func(generator, discriminator, batch_z, training)
            grads = tape.gradient(g_loss, generator.trainable_variables)
            g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print('Current epoch:', epoch,
                  'd_loss:', float(d_loss), 'g_loss:', float(g_loss),
                  'gp:', float(gp))

            z = tf.random.uniform([100, z_dim])
            g_imgs = generator(z, training=False)
            save_path = os.path.join('images', 'wgan-%d.png' % epoch)
            save_result(g_imgs.numpy(), 10, save_path, color_mode='P')
Esempio n. 7
0
def main():
    tf.random.set_seed(3333)
    np.random.seed(3333)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    z_dim = 100  # 隐藏向量z的长度
    epochs = 3000000  # 训练步数
    batch_size = 64
    learning_rate = 0.0002
    is_training = True

    # 获取数据集路径
    img_path = glob.glob(r'C:\Users\jay_n\.keras\datasets\faces\*.jpg') + \
        glob.glob(r'C:\Users\jay_n\.keras\datasets\faces\*.png')
    print('images num:', len(img_path))
    # 构建数据集对象
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size, resize=64)
    print(dataset, img_shape)
    sample = next(iter(dataset))  # 采样
    print(sample.shape, tf.reduce_max(sample).numpy(), tf.reduce_min(sample).numpy())
    dataset = dataset.repeat(100)
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(4, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(4, 64, 64, 3))
    # 分别为生成器和判别器创建优化器
    g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    # generator.load_weights('generator.ckpt')
    # discriminator.load_weights('discriminator.ckpt')
    # print('Loaded ckpt!!')

    d_losses, g_losses = [], []
    for epoch in range(epochs):
        # 1. 训练判别器
        for _ in range(1):
            # 采样隐藏向量
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)  # 采样真实图片
            # 判别器前向计算
            with tf.GradientTape() as tape:
                d_loss, _ = d_loss_fn(generator, discriminator, batch_z, batch_x, is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))
        # 2. 训练生成器
        # 采样隐藏向量
        batch_z = tf.random.normal([batch_size, z_dim])
        # 生成器前向计算
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss))
            # 可视化
            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('gan_images', 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')

        d_losses.append(float(d_loss))
        g_losses.append(float(g_loss))

        if epoch % 10000 == 1:
            generator.save_weights('generator.ckpt')
            discriminator.save_weights('discriminator.ckpt')
Esempio n. 8
0
def main():
    tf.random.set_seed(233)
    np.random.seed(233)

    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 2e-4
    # ratios = D steps:G steps
    ratios = 2

    img_path = glob.glob(os.path.join('faces', '*.jpg'))
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    # generator.load_weights(os.path.join('checkpoints', 'generator-5000'))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))
    # discriminator.load_weights(os.path.join('checkpoints', 'discriminator-5000'))

    g_optimizer = tf.optimizers.Adam(learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate, beta_1=0.5)
    # a fixed noise for sampling
    z_sample = tf.random.normal([100, z_dim])

    g_loss_meter = keras.metrics.Mean()
    d_loss_meter = keras.metrics.Mean()
    gp_meter = keras.metrics.Mean()

    for epoch in range(epochs):

        # train D
        for step in range(ratios):
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)
            with tf.GradientTape() as tape:
                d_loss, gp = d_loss_fn(generator, discriminator, batch_z,
                                       batch_x)

            d_loss_meter.update_state(d_loss)
            gp_meter.update_state(gp)

            gradients = tape.gradient(d_loss,
                                      discriminator.trainable_variables)
            d_optimizer.apply_gradients(
                zip(gradients, discriminator.trainable_variables))

        # train G
        batch_z = tf.random.normal([batch_size, z_dim])
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z)

        g_loss_meter.update_state(g_loss)

        gradients = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(
            zip(gradients, generator.trainable_variables))

        if epoch % 100 == 0:

            fake_image = generator(z_sample, training=False)

            print(epoch, 'd-loss:',
                  d_loss_meter.result().numpy(), 'g-loss',
                  g_loss_meter.result().numpy(), 'gp',
                  gp_meter.result().numpy())

            d_loss_meter.reset_states()
            g_loss_meter.reset_states()
            gp_meter.reset_states()

            # save generated image samples
            img_path = os.path.join('images_wgan_gp', 'wgan_gp-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')

        if epoch + 1 % 2000 == 0:
            generator.save_weights(
                os.path.join('checkpoints_gp', 'generator-%d' % epoch))
            discriminator.save_weights(
                os.path.join('checkpoints_gp', 'discriminator-%d' % epoch))
Esempio n. 9
0
def main():

    tf.random.set_seed(233)
    np.random.seed(233)
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 1
    batch_size = 64
    learning_rate = 0.0005
    is_training = True
    k = 5

    root = os.path.dirname(os.path.abspath(__file__))
    save_path = os.path.join(root, 'gan-images')

    img_path = glob.glob('faces/*.jpg')
    assert len(img_path) > 0

    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    z_sample = tf.random.normal([100, z_dim])
    g_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                           beta_1=0.5)
    d_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                           beta_1=0.5)

    for epoch in range(epochs):
        time_start = datetime.datetime.now()
        for _ in range(k):
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)

            # train D
            with tf.GradientTape() as tape:
                d_loss, gp = d_loss_fn(generator, discriminator, batch_z,
                                       batch_x, is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(
                zip(grads, discriminator.trainable_variables))

        batch_z = tf.random.normal([batch_size, z_dim])

        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        # Epoch: 0/1 TimeUsed: 0:00:03.809766 d-loss: 0.11730888 g-loss: -0.40536720 gp: 0.01527955
        print(
            f"Epoch: {epoch}/{epochs} TimeUsed: {datetime.datetime.now()-time_start} d-loss: {d_loss:.8f} g-loss: {g_loss:.8f} gp: {gp:.8f}"
        )

        if epoch % 100 == 0:
            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join(save_path, 'wgan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Esempio n. 10
0
import glob

#%%
'''
加载数据集
'''
from dataset import make_anime_dataset

#读取数据集路径,从https://pan.baidu.com/s/1eSifHcA 提取码:g5qa 下载解压
img_path = glob.glob(r'faces/*.jpg')
#glob是python自己带的一个文件操作相关模块,用它可以查找符合自己目的的文件
#glob模块的主要方法就是glob,该方法返回所有匹配的文件路径列表(list);
#该方法需要一个参数用来指定匹配的路径字符串(字符串可以为绝对路径也可以为相对路径),其返回的文件名只包括当前目录里的文件名,不包括子文件夹里的文件。
batch_size = 128
dataset, img_shape, _ = make_anime_dataset(img_path,
                                           batch_size=batch_size,
                                           resize=64)
#其中dataset对象就是tf.data.Dataset类实例,已经完成了随机打散、预处理和批量化等操作,可以直接迭代获得样本批,img_shape是预处理后的图片大小。
dataset = dataset.repeat(300)

#%%
'''
生成器

生成网络G由 5 个转置卷积层单元堆叠而成,实现特征图高宽的层层放大,特征图通道数的层层减少。
首先将长度为100的隐藏向量𝒛通过Reshape操作调整为[𝑏, 1,1,100]的4维张量,
并依序通过转置卷积层,放大高宽维度,减少通道数维度,最后得到高宽为 64,通道数为3的彩色图片。
每个卷积层中间插入BN层来提高训练稳定性,卷积层选择不使用偏置向量。
'''

Esempio n. 11
0
def main():
    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 0.002
    is_training = True

    # 数据集的加载
    img_path = glob.glob('/Users/tongli/Desktop/Python/TensorFlow/faces/*.jpg')
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)

    print(dataset, img_shape)

    sample = next(iter(dataset))

    print(sample.shape, tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())

    dataset = dataset.repeat()

    db_iter = iter(dataset)


    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    # 优化器
    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):
        # genrator的输入是随机输入的.生成一张图片

        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        # 真实的图片
        batch_x = next(db_iter)

        # train D 训练验证器
        with tf.GradientTape() as tape:
            d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x, is_training)

        grads = tape.gradient(d_loss, discriminator.trainable_variables)

        d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))


        # train G 训练生成器
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, ',d-loss:',float(d_loss), ',g-loss:',float(g_loss))

            z = tf.random.uniform([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('images', 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Esempio n. 12
0
def main():
    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    # 设置超参数
    z_dim = 10
    epoch = 3000000
    batch_size = 512
    learning_rate = 0.002
    is_training = True

    # 数据集加载,每一张图片路径集
    img_path = glob.glob(r'D:\PyCharm Projects\CarNum-CNN\data\faces\*.jpg')

    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    # print(dataset, img_shape)
    # sample = next(iter(dataset))
    # print(sample)

    # 无线采样
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    # 导入生成器模型和判断器模型
    genertor = Generator()
    genertor.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    # 分别设置两个优化器
    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epoch):

        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        batch_x = next(db_iter)

        # train D
        with tf.GradientTape() as tape:
            d_loss = d_loss_fn(genertor, discriminator, batch_z, batch_x, is_training)

        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))

        # train G
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(genertor, discriminator, batch_z, is_training)

        grads = tape.gradient(g_loss, genertor.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, genertor.trainable_variables))

        # 打印
        if epoch % 100 == 0:
            print(epoch, "d_loss:", float(d_loss), "g_loss:", float(g_loss))

            z = tf.random.uniform([100, z_dim])
            fake_image = genertor(z, training=False)
            img_path = os.path.join('./wgan_images', 'wgan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Esempio n. 13
0
def main():
    tf.random.set_seed(3333)
    np.random.seed(3333)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')
    z_dim = 100  # 隐藏向量z的长度
    epochs = 1  # 训练步数
    batch_size = 64  # batch size
    learning_rate = 0.0002
    is_training = True
    k = 5

    # Path
    root = os.path.dirname(os.path.abspath(__file__))
    model_path = os.path.join(root, 'models')
    generator_ckpt_path = os.path.join(model_path, 'generator',
                                       'generator.ckpt')
    discriminator_ckpt_path = os.path.join(model_path, 'discriminator',
                                           'discriminator.ckpt')
    save_image_path = os.path.join(root, 'gan_images')

    # 获取数据集路径
    img_path = glob.glob('faces/*.jpg')
    print('images num:', len(img_path))

    # 构建数据集对象
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size, resize=64)
    print(dataset, img_shape)

    sample = next(iter(dataset))  # 采样
    print(
        f"batch_shape: {sample.shape} max: {tf.reduce_max(sample).numpy()} min: {tf.reduce_min(sample).numpy()}"
    )

    dataset = dataset.repeat(100)  # 重复循环
    db_iter = iter(dataset)

    generator = Generator()  # 创建生成器
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()  # 创建判别器
    discriminator.build(input_shape=(None, 64, 64, 3))

    # 分别为生成器和判别器创建优化器
    g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)
    d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)

    if os.path.exists(generator_ckpt_path + '.index'):
        generator.load_weights(generator_ckpt_path)
        print('Loaded generator ckpt!!')
    if os.path.exists(discriminator_ckpt_path + '.index'):
        discriminator.load_weights(discriminator_ckpt_path)
        print('Loaded discriminator ckpt!!')

    d_losses, g_losses = [], []
    for epoch in range(epochs):  # 训练epochs次
        time_start = datetime.datetime.now()
        # 1. 训练判别器,训练k步后训练 generator
        for _ in range(k):
            # 采样隐藏向量
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)  # 采样真实图片
            # 判别器前向计算
            with tf.GradientTape() as tape:
                d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x,
                                   is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(
                zip(grads, discriminator.trainable_variables))

        # 2. 训练生成器
        # 采样隐藏向量
        batch_z = tf.random.normal([batch_size, z_dim])
        # 生成器前向计算
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        # Epoch: 0/1 TimeUsed: 0:00:10.126834 d-loss: 1.45619345 g-loss: 0.63321948
        print(
            f"Epoch: {epoch}/{epochs} TimeUsed: {datetime.datetime.now()-time_start} d-loss: {d_loss:.8f} g-loss: {g_loss:.8f}"
        )

        if epoch % 100 == 0:
            z = tf.random.normal([100, z_dim])  # 可视化
            fake_image = generator(z, training=False)
            img_path = os.path.join(save_image_path, 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')

            d_losses.append(float(d_loss))
            g_losses.append(float(g_loss))

            generator.save_weights(generator_ckpt_path)
            discriminator.save_weights(discriminator_ckpt_path)
Esempio n. 14
0
def main():

    tf.random.set_seed(3333)
    np.random.seed(3333)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    z_dim = 100  # 隐藏向量z的长度
    epochs = 3000000  # 训练步数
    batch_size = 64  # batch size
    learning_rate = 0.0002
    is_training = True

    # 获取数据集路径
    # C:\Users\z390\Downloads\anime-faces
    # r'C:\Users\z390\Downloads\faces\*.jpg'
    # img_path = glob.glob(r'C:\Users\z390\Downloads\anime-faces\*\*.jpg') + \
    # glob.glob(r'C:\Users\z390\Downloads\anime-faces\*\*.png')
    img_path = glob.glob(
        r'/home/ulysses/workspace/AI/Deep-Learning-with-TensorFlow-book/ch13/faces/*.jpg'
    )
    # img_path.extend(img_path2)
    print('images num:', len(img_path))
    # 构建数据集对象
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size, resize=64)
    print(dataset, img_shape)
    sample = next(iter(dataset))  # 采样
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat(100)  # 重复循环
    db_iter = iter(dataset)

    generator = Generator()  # 创建生成器
    generator.build(input_shape=(4, z_dim))
    discriminator = Discriminator()  # 创建判别器
    discriminator.build(input_shape=(4, 64, 64, 3))
    # 分别为生成器和判别器创建优化器
    g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)
    d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)
    if os.path.exists(r'./generator.ckpt.index'):
        generator.load_weights('generator.ckpt')
        print('Loaded generator chpt!!')
    if os.path.exists(r'./discriminator.ckpt.index'):
        discriminator.load_weights('discriminator.ckpt')
        print('Loaded discriminator chpt!!')

    d_losses, g_losses = [], []
    for epoch in range(epochs):  # 训练epochs次
        # 1. 训练判别器
        for _ in range(1):
            # 采样隐藏向量
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)  # 采样真实图片
            # 判别器前向计算
            with tf.GradientTape() as tape:
                d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x,
                                   is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(
                zip(grads, discriminator.trainable_variables))
        # 2. 训练生成器
        # 采样隐藏向量
        batch_z = tf.random.normal([batch_size, z_dim])
        batch_x = next(db_iter)  # 采样真实图片
        # 生成器前向计算
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss))
            # 可视化
            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('gan_images1', 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')

            d_losses.append(float(d_loss))
            g_losses.append(float(g_loss))

        if epoch % 10000 == 0:
            # print(d_losses)
            # print(g_losses)
            generator.save_weights('./check_point/generator.ckpt')
            discriminator.save_weights('./check_point/discriminator.ckpt')
def main():
    tf.random.set_seed(233)
    np.random.seed(233)
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 0.0005

    img_path = glob.glob(r'./faces/*.jpg')
    assert len(img_path) > 0
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = None
    USE_SAVED = True
    if (USE_SAVED and os.path.exists('./model.tf')):
        print('加载模型,继续上次训练')
        generator = tf.keras.models.load_model('./model.tf', compile=True)
    else:
        print('未找到保存的模型,重新开始训练')
        generator = Generator()
        generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    g_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                           beta_1=0.5)
    d_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                           beta_1=0.5)

    for epoch in range(epochs):
        # 判别器每ge epoch进行5次迭代
        for _ in range(5):
            batch_z = tf.random.normal([batch_size, z_dim])  # 生成假图片
            batch_x = next(db_iter)  # 加载真图片
            with tf.GradientTape() as tape:
                d_loss, gp = d_loss_fn(generator, discriminator, batch_x,
                                       batch_z, True)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(
                zip(grads, discriminator.trainable_variables))

        batch_z = tf.random.normal([batch_size, z_dim])  # 生成假图片
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, True)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))
        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss),
                  'gp-loss:', float(gp))

            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('images', 'wgan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
            # 子类实现的网络保存成h5格式不支持 参见 https://github.com/tensorflow/tensorflow/issues/29545
            generator.predict(
                z
            )  # 不调用一下,直接save 会报错,参见这里 https://github.com/tensorflow/tensorflow/issues/31057
            generator.save('./model.tf', overwrite=True, save_format="tf")