示例#1
0
def main():
    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    z_dim = 100
    epochs = 3000000
    batch_size = 512
    learning_rate = 0.005
    training = True

    img_path = glob.glob(r'.\faces\*.jpg')
    dataset, img_shape, _ = make_anime_dataset(img_path, batch_size)
    print(dataset, img_shape)
    sample_picture = next(iter(dataset))
    print(sample_picture.shape, tf.reduce_max(sample_picture).numpy(), tf.reduce_min(sample_picture).numpy())
    dataset = dataset.repeat()
    ds_iter = iter(dataset)

    generator = Generator()
    generator.build(input_shape=(None, z_dim))

    discriminator = Discriminator()
    discriminator.build(input_shape=(None, 64, 64, 3))

    g_optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.5)
    d_optimizer = tf.optimizers.RMSprop(learning_rate=learning_rate)

    for epoch in range(epochs):
        batch_z = tf.random.uniform([batch_size, z_dim], minval=-1., maxval=1.)
        batch_r = next(ds_iter)

        # discriminator training
        with tf.GradientTape() as tape:
            d_loss = d_loss_func(generator, discriminator, batch_z, batch_r, training)
        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(zip(grads, discriminator.trainable_variables))

        with tf.GradientTape() as tape:
            g_loss = g_loss_func(generator, discriminator, batch_z, training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print('Current epoch:', epoch, 'd_loss:', d_loss, 'g_loss:', g_loss)

            z = tf.random.uniform([100, z_dim])
            g_imgs = generator(z, training=False)
            save_path = os.path.join('images', 'gan-%d.png' % epoch)
            save_result(g_imgs.numpy(), 10, save_path, color_mode='P')
示例#2
0
def main():

    tf.random.set_seed(22)
    np.random.seed(22)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    assert tf.__version__.startswith('2.')

    # hyper parameters
    z_dim = 100
    epochs = 3000000
    batch_size = 128
    learning_rate = 0.0002
    is_training = True

    # for validation purpose
    assets_dir = './images'
    if not os.path.isdir(assets_dir):
        os.makedirs(assets_dir)
    val_block_size = 10
    val_size = val_block_size * val_block_size

    # load mnist data
    (x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
    x_train = x_train.astype(np.float32) / 255.
    db = tf.data.Dataset.from_tensor_slices(x_train).shuffle(
        batch_size * 4).batch(batch_size).repeat()
    db_iter = iter(db)
    inputs_shape = [-1, 28, 28, 1]

    # create generator & discriminator
    generator = Generator()
    generator.build(input_shape=(batch_size, z_dim))
    generator.summary()
    discriminator = Discriminator()
    discriminator.build(input_shape=(batch_size, 28, 28, 1))
    discriminator.summary()

    # prepare optimizer
    d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)
    g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate,
                                        beta_1=0.5)

    for epoch in range(epochs):

        # no need labels
        batch_x = next(db_iter)

        # rescale images to origin neighborhood
        batch_x = tf.reshape(batch_x, shape=inputs_shape)
        # -1 -1
        batch_x = batch_x * 2.0 - 1.0

        batch_z = tf.random.uniform(shape=[batch_size, z_dim],
                                    minval=-1.,
                                    maxval=1.)

        with tf.GradientTape() as tape:
            d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x,
                               is_training)
        grads = tape.gradient(d_loss, discriminator.trainable_variables)
        d_optimizer.apply_gradients(
            zip(grads, discriminator.trainable_variables))

        with tf.GradientTape() as tape:
            g_loss = g_loss_fn(generator, discriminator, batch_z, is_training)
        grads = tape.gradient(g_loss, generator.trainable_variables)
        g_optimizer.apply_gradients(zip(grads, generator.trainable_variables))

        if epoch % 100 == 0:
            print(epoch, 'd loss', float(d_loss), 'g loss:', float(g_loss))

            # validation results at every epoch
            val_z = np.random.uniform(-1, 1, size=(val_size, z_dim))
            fake_image = generator(val_z, training=False)
            image_fn = os.path.join(
                r'C:\Users\Wojtek\Documents\Projects\DeepLearningPlayground\DCGAN\images',
                'gan-val-{:03d}.png'.format(epoch + 1))
            save_result(fake_image.numpy(),
                        val_block_size,
                        image_fn,
                        color_mode='L')