Ejemplo n.º 1
0
def main():

    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 0.002
    is_training = True

    img_path = glob.glob(
        r'C:\Users\Jackie Loong\Downloads\DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2-master\data\faces\*.jpg'
    )

    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):

        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        batch_x = next(db_iter)

        # train D
        with tf.GradientTape() as tape:
            d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x,
                               is_training)
        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(
            zip(grads, discriminator.trainable_variables))

        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss))

            z = tf.random.uniform([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('images', 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Ejemplo n.º 2
0
def main():
    tf.random.set_seed(233)
    np.random.seed(233)
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 0.0005
    is_training = True

    img_path = glob.glob(r'C:\Users\Jackie\Downloads\faces\*.jpg')
    assert len(img_path) > 0

    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape, tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))
    z_sample = tf.random.normal([100, z_dim])

    g_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):

        for _ in range(5):
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)

            # train D
            with tf.GradientTape() as tape:
                d_loss, gp = d_loss_fn(generator, discriminator, batch_z, batch_x, is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))

        batch_z = tf.random.normal([batch_size, z_dim])

        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss),
                  'gp:', float(gp))

            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('images', 'wgan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Ejemplo n.º 3
0
def main():
    # 设计随机种子,方便复现
    tf.random.set_seed(22)
    np.random.seed(22)
    # 设定相关参数
    z_dim = 100
    epochs = 3000000
    batch_size = 512  # 根据自己的GPU能力设计
    learning_rate = 0.002
    is_training = True
    # 加载数据(根据自己的路径更改),建立网络
    img_path = glob.glob(
        r'C:\Users\Jackie Loong\Downloads\DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2-master\data\faces\*.jpg'
    )
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    # print(dataset, img_shape)
    # sample = next(iter(dataset))
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))
    # 建立优化器
    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):
        # 随机取样出来的结果
        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        batch_x = next(db_iter)

        # 训练检测网络
        with tf.GradientTape() as tape:
            d_loss, gp = d_loss_fn(generator, discriminator, batch_z, batch_x,
                                   is_training)

        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(
            zip(grads, discriminator.trainable_variables))

        # 训练生成网络
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)

        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss),
                  'gp:', float(gp))
            z = tf.random.uniform([100, z_dim])
            fake_image = generator(z, training=False)
            # 生成的图片保存,images文件夹下, 图片名为:wgan-epoch.png
            img_path = os.path.join('images', 'wgan-%d.png' % epoch)
            # 10*10, 彩色图片
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')
Ejemplo n.º 4
0
def train():
    tf.random.set_seed(22)
    np.random.seed(22)
    data_iter = dataset.load_dataset()

    # 利用数组形式实现多输入模型
    generator = Generator()
    generator.build(input_shape=[(None, z_dim), (None, 10)])
    discriminator = Discriminator()
    discriminator.build(input_shape=[(None, 28, 28, 1), (None, 10)])

    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    for epoch in range(epochs):
        for i in range(int(60000 / batch_size / epochs_d)):

            batch_z = tf.random.uniform([batch_size, z_dim],
                                        minval=0.,
                                        maxval=1.)
            batch_c = []
            for k in range(batch_size):
                batch_c.append(np.random.randint(0, 10))
            batch_c = tf.one_hot(tf.convert_to_tensor(batch_c), 10)

            # train D
            for epoch_d in range(epochs_d):
                batch_data = next(data_iter)
                batch_x = batch_data[0]
                batch_y = batch_data[1]
                with tf.GradientTape() as tape:
                    d_loss = d_loss_fn(generator, discriminator, batch_z,
                                       batch_c, batch_x, batch_y, is_training)
                grads = tape.gradient(d_loss,
                                      discriminator.trainable_variables)
                d_optimizer.apply_gradients(
                    zip(grads, discriminator.trainable_variables))

            # train G
            with tf.GradientTape() as tape:
                g_loss = g_loss_fn(generator, discriminator, batch_z, batch_c,
                                   is_training)
            grads = tape.gradient(g_loss, generator.trainable_variables)
            g_optimizer.apply_gradients(
                zip(grads, generator.trainable_variables))

        print('epoch : {epoch} d-loss : {d_loss} g-loss : {g_loss}'.format(
            epoch=epoch, d_loss=d_loss, g_loss=g_loss))

        z = tf.random.uniform([100, z_dim], minval=0., maxval=1.)
        c = []
        for i in range(10):
            for j in range(10):
                c.append(i)
        c = tf.one_hot(tf.convert_to_tensor(c), 10)
        fake_image = generator([z, c], training=False)
        img_path = os.path.join('images', 'infogan-%d-final.png' % epoch)
        saver.save_image(fake_image.numpy(), img_path, 10)
Ejemplo n.º 5
0
def main():
    tf.random.set_seed(3333)
    np.random.seed(3333)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    z_dim = 100  # 隐藏向量z的长度
    epochs = 3000000  # 训练步数
    batch_size = 64
    learning_rate = 0.0002
    is_training = True

    # 获取数据集路径
    img_path = glob.glob(r'C:\Users\jay_n\.keras\datasets\faces\*.jpg') + \
        glob.glob(r'C:\Users\jay_n\.keras\datasets\faces\*.png')
    print('images num:', len(img_path))
    # 构建数据集对象
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size, resize=64)
    print(dataset, img_shape)
    sample = next(iter(dataset))  # 采样
    print(sample.shape, tf.reduce_max(sample).numpy(), tf.reduce_min(sample).numpy())
    dataset = dataset.repeat(100)
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(4, z_dim))
    discriminator = Discriminator()
    discriminator.build(input_shape=(4, 64, 64, 3))
    # 分别为生成器和判别器创建优化器
    g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)

    # generator.load_weights('generator.ckpt')
    # discriminator.load_weights('discriminator.ckpt')
    # print('Loaded ckpt!!')

    d_losses, g_losses = [], []
    for epoch in range(epochs):
        # 1. 训练判别器
        for _ in range(1):
            # 采样隐藏向量
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)  # 采样真实图片
            # 判别器前向计算
            with tf.GradientTape() as tape:
                d_loss, _ = d_loss_fn(generator, discriminator, batch_z, batch_x, is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))
        # 2. 训练生成器
        # 采样隐藏向量
        batch_z = tf.random.normal([batch_size, z_dim])
        # 生成器前向计算
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss))
            # 可视化
            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('gan_images', 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')

        d_losses.append(float(d_loss))
        g_losses.append(float(g_loss))

        if epoch % 10000 == 1:
            generator.save_weights('generator.ckpt')
            discriminator.save_weights('discriminator.ckpt')
Ejemplo n.º 6
0
def main():

    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 128
    learning_rate = 0.0002
    is_training = True

    # for validation purpose
    assets_dir = './images'
    if not os.path.isdir(assets_dir):
        os.makedirs(assets_dir)
    val_block_size = 10
    val_size = val_block_size * val_block_size

    # load mnist data
    (x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
    x_train = x_train.astype(np.float32) / 255.
    db = tf.data.Dataset.from_tensor_slices(x_train).shuffle(
        batch_size * 4).batch(batch_size).repeat()
    db_iter = iter(db)
    inputs_shape = [-1, 28, 28, 1]

    # create generator & discriminator
    generator = Generator()
    generator.build(input_shape=(batch_size, z_dim))
    generator.summary()
    discriminator = Discriminator()
    discriminator.build(input_shape=(batch_size, 28, 28, 1))
    discriminator.summary()

    # prepare optimizer
    d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)
    g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)

    for epoch in range(epochs):

        # no need labels
        batch_x = next(db_iter)

        # rescale images to -1 ~ 1
        batch_x = tf.reshape(batch_x, shape=inputs_shape)
        # -1 - 1
        batch_x = batch_x * 2.0 - 1.0

        # Sample random noise for G
        batch_z = tf.random.uniform(shape=[batch_size, z_dim],
                                    minval=-1.,
                                    maxval=1.)

        with tf.GradientTape() as tape:
            d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x,
                               is_training)
        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(
            zip(grads, discriminator.trainable_variables))

        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:

            print(epoch, 'd loss:', float(d_loss), 'g loss:', float(g_loss))

            # validation results at every epoch
            val_z = np.random.uniform(-1, 1, size=(val_size, z_dim))
            fake_image = generator(val_z, training=False)
            image_fn = os.path.join('images',
                                    'gan-val-{:03d}.png'.format(epoch + 1))
            save_result(fake_image.numpy(),
                        val_block_size,
                        image_fn,
                        color_mode='L')
Ejemplo n.º 7
0
def main():
    tf.random.set_seed(233)
    np.random.seed(233)

    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 2e-4
    # ratios = D steps:G steps
    ratios = 2

    img_path = glob.glob(os.path.join('faces', '*.jpg'))
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample = next(iter(dataset))
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat()
    db_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))
    # generator.load_weights(os.path.join('checkpoints', 'generator-5000'))
    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))
    # discriminator.load_weights(os.path.join('checkpoints', 'discriminator-5000'))

    g_optimizer = tf.optimizers.Adam(learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.Adam(learning_rate, beta_1=0.5)
    # a fixed noise for sampling
    z_sample = tf.random.normal([100, z_dim])

    g_loss_meter = keras.metrics.Mean()
    d_loss_meter = keras.metrics.Mean()
    gp_meter = keras.metrics.Mean()

    for epoch in range(epochs):

        # train D
        for step in range(ratios):
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)
            with tf.GradientTape() as tape:
                d_loss, gp = d_loss_fn(generator, discriminator, batch_z,
                                       batch_x)

            d_loss_meter.update_state(d_loss)
            gp_meter.update_state(gp)

            gradients = tape.gradient(d_loss,
                                      discriminator.trainable_variables)
            d_optimizer.apply_gradients(
                zip(gradients, discriminator.trainable_variables))

        # train G
        batch_z = tf.random.normal([batch_size, z_dim])
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z)

        g_loss_meter.update_state(g_loss)

        gradients = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(
            zip(gradients, generator.trainable_variables))

        if epoch % 100 == 0:

            fake_image = generator(z_sample, training=False)

            print(epoch, 'd-loss:',
                  d_loss_meter.result().numpy(), 'g-loss',
                  g_loss_meter.result().numpy(), 'gp',
                  gp_meter.result().numpy())

            d_loss_meter.reset_states()
            g_loss_meter.reset_states()
            gp_meter.reset_states()

            # save generated image samples
            img_path = os.path.join('images_wgan_gp', 'wgan_gp-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')

        if epoch + 1 % 2000 == 0:
            generator.save_weights(
                os.path.join('checkpoints_gp', 'generator-%d' % epoch))
            discriminator.save_weights(
                os.path.join('checkpoints_gp', 'discriminator-%d' % epoch))
Ejemplo n.º 8
0
def main():

    tf.random.set_seed(3333)
    np.random.seed(3333)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    z_dim = 100  # 隐藏向量z的长度
    epochs = 3000000  # 训练步数
    batch_size = 64  # batch size
    learning_rate = 0.0002
    is_training = True

    # 获取数据集路径
    # C:\Users\z390\Downloads\anime-faces
    # r'C:\Users\z390\Downloads\faces\*.jpg'
    # img_path = glob.glob(r'C:\Users\z390\Downloads\anime-faces\*\*.jpg') + \
    # glob.glob(r'C:\Users\z390\Downloads\anime-faces\*\*.png')
    img_path = glob.glob(
        r'/home/ulysses/workspace/AI/Deep-Learning-with-TensorFlow-book/ch13/faces/*.jpg'
    )
    # img_path.extend(img_path2)
    print('images num:', len(img_path))
    # 构建数据集对象
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size, resize=64)
    print(dataset, img_shape)
    sample = next(iter(dataset))  # 采样
    print(sample.shape,
          tf.reduce_max(sample).numpy(),
          tf.reduce_min(sample).numpy())
    dataset = dataset.repeat(100)  # 重复循环
    db_iter = iter(dataset)

    generator = Generator()  # 创建生成器
    generator.build(input_shape=(4, z_dim))
    discriminator = Discriminator()  # 创建判别器
    discriminator.build(input_shape=(4, 64, 64, 3))
    # 分别为生成器和判别器创建优化器
    g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)
    d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)
    if os.path.exists(r'./generator.ckpt.index'):
        generator.load_weights('generator.ckpt')
        print('Loaded generator chpt!!')
    if os.path.exists(r'./discriminator.ckpt.index'):
        discriminator.load_weights('discriminator.ckpt')
        print('Loaded discriminator chpt!!')

    d_losses, g_losses = [], []
    for epoch in range(epochs):  # 训练epochs次
        # 1. 训练判别器
        for _ in range(1):
            # 采样隐藏向量
            batch_z = tf.random.normal([batch_size, z_dim])
            batch_x = next(db_iter)  # 采样真实图片
            # 判别器前向计算
            with tf.GradientTape() as tape:
                d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x,
                                   is_training)
            grads = tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(
                zip(grads, discriminator.trainable_variables))
        # 2. 训练生成器
        # 采样隐藏向量
        batch_z = tf.random.normal([batch_size, z_dim])
        batch_x = next(db_iter)  # 采样真实图片
        # 生成器前向计算
        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd-loss:', float(d_loss), 'g-loss:', float(g_loss))
            # 可视化
            z = tf.random.normal([100, z_dim])
            fake_image = generator(z, training=False)
            img_path = os.path.join('gan_images1', 'gan-%d.png' % epoch)
            save_result(fake_image.numpy(), 10, img_path, color_mode='P')

            d_losses.append(float(d_loss))
            g_losses.append(float(g_loss))

        if epoch % 10000 == 0:
            # print(d_losses)
            # print(g_losses)
            generator.save_weights('./check_point/generator.ckpt')
            discriminator.save_weights('./check_point/discriminator.ckpt')