Exemple #1
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator([None, flags.z_dim])
    D = get_discriminator(
        [None, flags.output_size, flags.output_size, flags.c_dim])

    G.train()
    D.train()

    d_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
    g_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for epoch in range(flags.n_epoch):
        for step, batch_images in enumerate(images):
            if batch_images.shape[0] != flags.batch_size:
                break

            step_time = time.time()
            with tf.GradientTape(persistent=True) as tape:
                z = np.random.normal(loc=0.0,
                                     scale=1.0,
                                     size=[flags.batch_size,
                                           flags.z_dim]).astype(np.float32)
                d_logits = D(G(z))
                d2_logits = D(batch_images)
                # discriminator: real images are labelled as 1
                d_loss_real = tl.cost.sigmoid_cross_entropy(
                    d2_logits, tf.ones_like(d2_logits), name='dreal')
                # discriminator: images from generator (fake) are labelled as 0
                d_loss_fake = tl.cost.sigmoid_cross_entropy(
                    d_logits, tf.zeros_like(d_logits), name='dfake')
                # combined loss for updating discriminator
                d_loss = d_loss_real + d_loss_fake
                # generator: try to fool discriminator to output 1
                g_loss = tl.cost.sigmoid_cross_entropy(d_logits,
                                                       tf.ones_like(d_logits),
                                                       name='gfake')

            grad = tape.gradient(g_loss, G.trainable_weights)
            g_optimizer.apply_gradients(zip(grad, G.trainable_weights))
            grad = tape.gradient(d_loss, D.trainable_weights)
            d_optimizer.apply_gradients(zip(grad, D.trainable_weights))
            del tape

            if step % flags.print_every_step == 0:
                print("Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".format(epoch, \
                      flags.n_epoch, step, n_step_epoch, time.time()-step_time, d_loss, g_loss))

        if np.mod(epoch, flags.save_every_epoch) == 0:
            G.save_weights('{}/G_{}.h5'.format(flags.checkpoint_dir, epoch))
            D.save_weights('{}/D_{}.h5'.format(flags.checkpoint_dir, epoch))
            G.eval()
            result = G(z)
            G.train()
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}.png'.format(flags.sample_dir, epoch))
Exemple #2
0
def recon():
    ds, _ = get_celebA(flags.output_size, 1, 1)
    for im in ds:
        if input() == 'q':
            break
        #print(im)
        rec = G(E(im))
        plt.imshow(np.array((im + 1.) * 127.5, dtype=np.uint8)[0])
        plt.show()
        plt.imshow(np.array((rec + 1.) * 127.5, dtype=np.uint8)[0])
        plt.show()
Exemple #3
0
def test(weights_pathG, weights_pathE, real_path='real_image.png', reproduced_path='reproduced_image.png'):
    images, images_path = get_celebA(flags.output_size, flags.n_epoch, flags.batch_size)
    num_tiles = int(math.ceil(math.sqrt(flags.sample_size)))
    G = get_generator([None, flags.z_dim])
    G.load_weights(weights_pathG, format='npz')
    G.eval()
    E = get_encoder([None, flags.output_size, flags.output_size, flags.c_dim])
    E.load_weights(weights_pathE, format='npz')
    E.eval()

    for step, batch_images in enumerate(images):
        if batch_images.shape[0] != flags.batch_size:
            break
        result = G(E(batch_images))
        tl.visualize.save_images(batch_images.numpy(), [num_tiles, num_tiles], real_path)
        tl.visualize.save_images(result.numpy(), [num_tiles, num_tiles], reproduced_path)
        break
Exemple #4
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator()
    E = get_encoder()
    G.load_weights(G_weights)
    G.train()
    E.train()
    optimizer = tf.optimizers.Adam(learning_rate, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for epoch in range(n_epoch):
        for step, batch_images in enumerate(images):
            if batch_images.shape[0] != flags.batch_size:
                break

            step_time = time.time()
            with tf.GradientTape() as tape:
                z = np.random.normal(loc=0.0,
                                     scale=1.0,
                                     size=[batch_size,
                                           flags.z_dim]).astype(np.float32)
                gen = G(z)
                z_encode = E(gen)

                x_encode = E(batch_images)
                x_decode = G(x_encode)

                z_recon_loss = tl.cost.absolute_difference_error(z_encode,
                                                                 z,
                                                                 is_mean=True)
                x_recon_loss = 5. * tl.cost.absolute_difference_error(
                    x_decode, batch_images, is_mean=True)
                loss = z_recon_loss + x_recon_loss

            grad = tape.gradient(loss, E.trainable_weights)
            optimizer.apply_gradients(zip(grad, E.trainable_weights))

            if step % print_every_step == 0:
                print('Epoch: [{}/{}] step: [{}/{}] took: {:3f}, z_recon_loss: {:5f}, x_recon_loss: {:5f}'.format(epoch, n_epoch,\
                      step, n_step_epoch, time.time()-step_time, z_recon_loss, x_recon_loss))

        if epoch % save_every_epoch == 0:
            E.save_weights('{}/E_{}.h5'.format(flags.checkpoint_dir, epoch))
Exemple #5
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator([None, flags.z_dim])
    D = get_discriminator(
        [None, flags.output_size, flags.output_size, flags.c_dim])

    G.train()
    D.train()

    d_optimizer = tf.optimizers.Adam(flags.learning_rate, beta_1=flags.beta1)
    g_optimizer = tf.optimizers.Adam(flags.learning_rate, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for step, batch_images in enumerate(images):
        step_time = time.time()
        with tf.GradientTape(persistent=True) as tape:
            # z = tf.distributions.Normal(0., 1.).sample([flags.batch_size, flags.z_dim]) #tf.placeholder(tf.float32, [None, z_dim], name='z_noise')
            z = np.random.normal(loc=0.0,
                                 scale=1.0,
                                 size=[flags.batch_size,
                                       flags.z_dim]).astype(np.float32)
            d_logits = D(G(z))
            d2_logits = D(batch_images)
            # discriminator: real images are labelled as 1
            d_loss_real = tl.cost.sigmoid_cross_entropy(
                d2_logits, tf.ones_like(d2_logits), name='dreal')
            # discriminator: images from generator (fake) are labelled as 0
            d_loss_fake = tl.cost.sigmoid_cross_entropy(
                d_logits, tf.zeros_like(d_logits), name='dfake')
            # combined loss for updating discriminator
            d_loss = d_loss_real + d_loss_fake
            # generator: try to fool discriminator to output 1
            g_loss = tl.cost.sigmoid_cross_entropy(d_logits,
                                                   tf.ones_like(d_logits),
                                                   name='gfake')

        grad = tape.gradient(g_loss, G.weights)
        g_optimizer.apply_gradients(zip(grad, G.weights))
        grad = tape.gradient(d_loss, D.weights)
        d_optimizer.apply_gradients(zip(grad, D.weights))
        del tape

        print(
            "Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".
            format(step // n_step_epoch, flags.n_epoch, step, n_step_epoch,
                   time.time() - step_time, d_loss, g_loss))
        if np.mod(step, flags.save_step) == 0:
            G.save_weights('{}/G.npz'.format(flags.checkpoint_dir),
                           format='npz')
            D.save_weights('{}/D.npz'.format(flags.checkpoint_dir),
                           format='npz')
            G.eval()
            result = G(z)
            G.train()
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}_{:04d}.png'.format(flags.sample_dir,
                                                    step // n_step_epoch,
                                                    step))
Exemple #6
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator([None, flags.z_dim])
    D = get_discriminator(
        [None, flags.z_dim],
        [None, flags.output_size, flags.output_size, flags.c_dim])
    E = get_encoder([None, flags.output_size, flags.output_size, flags.c_dim])

    if flags.load_weights:
        E.load_weights('checkpoint/E.npz', format='npz')
        G.load_weights('checkpoint/G.npz', format='npz')
        D.load_weights('checkpoint/D.npz', format='npz')

    G.train()
    D.train()
    E.train()

    d_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
    g_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
    e_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for epoch in range(flags.n_epoch):
        for step, batch_images in enumerate(images):
            if batch_images.shape[0] != flags.batch_size:
                break
            step_time = time.time()

            with tf.GradientTape(persistent=True) as tape:
                z = np.random.normal(loc=0.0,
                                     scale=1.0,
                                     size=[flags.batch_size,
                                           flags.z_dim]).astype(np.float32)

                d_logits = D([G(z), z])
                d2_logits = D([batch_images, E(batch_images)])

                d_loss_real = tl.cost.sigmoid_cross_entropy(
                    d2_logits, tf.ones_like(d2_logits), name='dreal')
                d_loss_fake = tl.cost.sigmoid_cross_entropy(
                    d_logits, tf.zeros_like(d_logits), name='dfake')
                d_loss = d_loss_fake + d_loss_real

                g_loss = tl.cost.sigmoid_cross_entropy(d_logits,
                                                       tf.ones_like(d_logits),
                                                       name='gfake')

                e_loss = tl.cost.sigmoid_cross_entropy(
                    d2_logits, tf.zeros_like(d2_logits), name='ereal')

            grad = tape.gradient(g_loss, G.trainable_weights)
            g_optimizer.apply_gradients(zip(grad, G.trainable_weights))
            grad = tape.gradient(d_loss, D.trainable_weights)
            d_optimizer.apply_gradients(zip(grad, D.trainable_weights))
            grad = tape.gradient(e_loss, E.trainable_weights)
            e_optimizer.apply_gradients(zip(grad, E.trainable_weights))

            del tape

            print(
                "Epoch: [{}/{}] [{}/{}] took: {:.3f}, d_loss: {:.5f}, g_loss: {:.5f}, e_loss: {:.5f}"
                .format(epoch, flags.n_epoch, step, n_step_epoch,
                        time.time() - step_time, d_loss, g_loss, e_loss))

        if np.mod(epoch, flags.save_every_epoch) == 0:
            G.save_weights('{}/G.npz'.format(flags.checkpoint_dir),
                           format='npz')
            D.save_weights('{}/D.npz'.format(flags.checkpoint_dir),
                           format='npz')
            E.save_weights('{}/E.npz'.format(flags.checkpoint_dir),
                           format='npz')
            G.eval()
            result = G(z)
            G.train()
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}.png'.format(flags.sample_dir, epoch))

            for step, batch_images in enumerate(images):
                if batch_images.shape[0] != flags.batch_size:
                    break
                result = G(E(batch_images))
                tl.visualize.save_images(
                    batch_images.numpy(), [num_tiles, num_tiles],
                    '{}/real_{:02d}.png'.format(flags.pair_dir, epoch))
                tl.visualize.save_images(
                    result.numpy(), [num_tiles, num_tiles],
                    '{}/reproduced_{:02d}.png'.format(flags.pair_dir, epoch))
                break
Exemple #7
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_G([None, flags.dim_z])
    Base = get_base(
        [None, flags.output_size, flags.output_size, flags.n_channel])
    D = get_D([None, 4096])
    Q = get_Q([None, 4096])

    G.train()
    Base.train()
    D.train()
    Q.train()

    g_optimizer = tf.optimizers.Adam(learning_rate=flags.G_learning_rate,
                                     beta_1=flags.beta_1)
    d_optimizer = tf.optimizers.Adam(learning_rate=flags.D_learning_rate,
                                     beta_1=flags.beta_1)

    n_step_epoch = int(len(images_path) // flags.batch_size)
    his_g_loss = []
    his_d_loss = []
    his_mutual = []
    count = 0

    for epoch in range(flags.n_epoch):
        for step, batch_images in enumerate(images):
            count += 1
            if batch_images.shape[0] != flags.batch_size:
                break
            step_time = time.time()
            with tf.GradientTape(persistent=True) as tape:
                z, c = gen_noise()
                fake = Base(G(z))
                fake_logits = D(fake)
                fake_cat = Q(fake)
                real_logits = D(Base(batch_images))

                d_loss_fake = tl.cost.sigmoid_cross_entropy(
                    output=fake_logits,
                    target=tf.zeros_like(fake_logits),
                    name='d_loss_fake')
                d_loss_real = tl.cost.sigmoid_cross_entropy(
                    output=real_logits,
                    target=tf.ones_like(real_logits),
                    name='d_loss_real')
                d_loss = d_loss_fake + d_loss_real

                g_loss = tl.cost.sigmoid_cross_entropy(
                    output=fake_logits,
                    target=tf.ones_like(fake_logits),
                    name='g_loss_fake')

                mutual = calc_mutual(fake_cat, c)
                g_loss += mutual

            grad = tape.gradient(g_loss,
                                 G.trainable_weights + Q.trainable_weights)
            g_optimizer.apply_gradients(
                zip(grad, G.trainable_weights + Q.trainable_weights))
            grad = tape.gradient(d_loss,
                                 D.trainable_weights + Base.trainable_weights)
            d_optimizer.apply_gradients(
                zip(grad, D.trainable_weights + Base.trainable_weights))
            del tape
            print(
                f"Epoch: [{epoch}/{flags.n_epoch}] [{step}/{n_step_epoch}] took: {time.time()-step_time:.3f}, d_loss: {d_loss:.5f}, g_loss: {g_loss:.5f}, mutual: {mutual:.5f}"
            )

            if count % flags.save_every_it == 1:
                his_g_loss.append(g_loss)
                his_d_loss.append(d_loss)
                his_mutual.append(mutual)

        plt.plot(his_d_loss)
        plt.plot(his_g_loss)
        plt.plot(his_mutual)
        plt.legend(['D_Loss', 'G_Loss', 'Mutual_Info'])
        plt.xlabel(f'Iterations / {flags.save_every_it}')
        plt.ylabel('Loss')
        plt.savefig(f'{flags.result_dir}/loss.jpg')
        plt.clf()
        plt.close()

        G.save_weights(f'{flags.checkpoint_dir}/G.npz', format='npz')
        D.save_weights(f'{flags.checkpoint_dir}/D.npz', format='npz')
        G.eval()
        for k in range(flags.n_categorical):
            z = gen_eval_noise(k, flags.n_sample)
            result = G(z)
            tl.visualize.save_images(convert(result.numpy()),
                                     [flags.n_sample, flags.dim_categorical],
                                     f'result/train_{epoch}_{k}.png')
        G.train()