def example_gan_unrolled_hinge(path, depth_g, depth_d, clipvalue=2.0): # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape, hidden_dim=512, batch_norm_mode=-1) # discriminator (x -> y) discriminator = model_discriminator(input_shape, output_activation='linear', hidden_dim=512, batch_norm_mode=-1, dropout=0) example_gan(UnrolledAdversarialOptimizer(depth_g=depth_g, depth_d=depth_d), path, opt_g=Adam(1e-4, decay=1e-4, clipvalue=clipvalue), opt_d=Adam(1e-3, decay=1e-4, clipvalue=clipvalue), nb_epoch=50, generator=generator, discriminator=discriminator, latent_dim=latent_dim, loss="squared_hinge", targets=gan_targets_hinge)
def example_gan_unrolled(path, depth_g, depth_d): # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape, hidden_dim=512, batch_norm_mode=1) # discriminator (x -> y) discriminator = model_discriminator(input_shape, hidden_dim=512, dropout=0, batch_norm_mode=1) example_gan(UnrolledAdversarialOptimizer(depth_g=depth_g, depth_d=depth_d), path, opt_g=Adam(1e-4, decay=1e-4), opt_d=Adam(1e-3, decay=1e-4), nb_epoch=50, generator=generator, discriminator=discriminator, latent_dim=latent_dim)
def main(): # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # encoder (x ->z) encoder = model_encoder(latent_dim, input_shape) # autoencoder (x -> x') autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs))) # discriminator (x -> y) discriminator = model_discriminator(latent_dim, input_shape) # bigan (x - > yfake, yreal), z generated on GPU bigan = simple_bigan(generator, encoder, discriminator, normal_latent_sampling((latent_dim, ))) generative_params = generator.trainable_weights + encoder.trainable_weights # print summary of models generator.summary() encoder.summary() discriminator.summary() bigan.summary() autoencoder.summary() # build adversarial model model = AdversarialModel( base_model=bigan, player_params=[generative_params, discriminator.trainable_weights], player_names=["generator", "discriminator"]) model.adversarial_compile( adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy') # train model xtrain, xtest = mnist_data() def generator_sampler(): zsamples = np.random.normal(size=(10 * 10, latent_dim)) return generator.predict(zsamples).reshape((10, 10, 28, 28)) generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png", generator_sampler) def autoencoder_sampler(): xsamples = n_choice(xtest, 10) xrep = np.repeat(xsamples, 9, axis=0) xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28)) xsamples = xsamples.reshape((10, 1, 28, 28)) x = np.concatenate((xsamples, xgen), axis=1) return x autoencoder_cb = ImageGridCallback( "output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler) y = gan_targets(xtrain.shape[0]) ytest = gan_targets(xtest.shape[0]) history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb], nb_epoch=100, batch_size=32) df = pd.DataFrame(history.history) df.to_csv("output/bigan/history.csv") encoder.save("output/bigan/encoder.h5") generator.save("output/bigan/generator.h5") discriminator.save("output/bigan/discriminator.h5")
def example_bigan(path, adversarial_optimizer): # z \in R^100 latent_dim = 25 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # encoder (x ->z) encoder = model_encoder(latent_dim, input_shape) # autoencoder (x -> x') autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs))) # discriminator (x -> y) discriminator_train, discriminator_test = model_discriminator( latent_dim, input_shape) # bigan (z, x - > yfake, yreal) bigan_generator = simple_bigan(generator, encoder, discriminator_test) bigan_discriminator = simple_bigan(generator, encoder, discriminator_train) # z generated on GPU based on batch dimension of x x = bigan_generator.inputs[1] z = normal_latent_sampling((latent_dim, ))(x) # eliminate z from inputs bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names)) bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names)) generative_params = generator.trainable_weights + encoder.trainable_weights # print summary of models generator.summary() encoder.summary() discriminator_train.summary() bigan_discriminator.summary() autoencoder.summary() # build adversarial model model = AdversarialModel( player_models=[bigan_generator, bigan_discriminator], player_params=[ generative_params, discriminator_train.trainable_weights ], player_names=["generator", "discriminator"]) model.adversarial_compile( adversarial_optimizer=adversarial_optimizer, player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy') # load mnist data xtrain, xtest = mnist_data() # callback for image grid of generated samples def generator_sampler(): zsamples = np.random.normal(size=(10 * 10, latent_dim)) return generator.predict(zsamples).reshape((10, 10, 28, 28)) generator_cb = ImageGridCallback( os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler) # callback for image grid of autoencoded samples def autoencoder_sampler(): xsamples = n_choice(xtest, 10) xrep = np.repeat(xsamples, 9, axis=0) xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28)) xsamples = xsamples.reshape((10, 1, 28, 28)) x = np.concatenate((xsamples, xgen), axis=1) return x autoencoder_cb = ImageGridCallback( os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler) # train network y = gan_targets(xtrain.shape[0]) ytest = gan_targets(xtest.shape[0]) history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb], nb_epoch=100, batch_size=32) # save history df = pd.DataFrame(history.history) df.to_csv(os.path.join(path, "history.csv")) # save model encoder.save(os.path.join(path, "encoder.h5")) generator.save(os.path.join(path, "generator.h5")) discriminator_train.save(os.path.join(path, "discriminator.h5"))
def main(): # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # encoder (x ->z) encoder = model_encoder(latent_dim, input_shape) # autoencoder (x -> x') autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs))) # discriminator (x -> y) discriminator = model_discriminator(latent_dim, input_shape) # bigan (x - > yfake, yreal), z generated on GPU bigan = simple_bigan(generator, encoder, discriminator, normal_latent_sampling((latent_dim,))) generative_params = generator.trainable_weights + encoder.trainable_weights # print summary of models generator.summary() encoder.summary() discriminator.summary() bigan.summary() autoencoder.summary() # build adversarial model model = AdversarialModel(base_model=bigan, player_params=[generative_params, discriminator.trainable_weights], player_names=["generator", "discriminator"]) model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy') # train model xtrain, xtest = mnist_data() def generator_sampler(): zsamples = np.random.normal(size=(10 * 10, latent_dim)) return generator.predict(zsamples).reshape((10, 10, 28, 28)) generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png", generator_sampler) def autoencoder_sampler(): xsamples = n_choice(xtest, 10) xrep = np.repeat(xsamples, 9, axis=0) xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28)) xsamples = xsamples.reshape((10, 1, 28, 28)) x = np.concatenate((xsamples, xgen), axis=1) return x autoencoder_cb = ImageGridCallback("output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler) y = gan_targets(xtrain.shape[0]) ytest = gan_targets(xtest.shape[0]) history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb], nb_epoch=100, batch_size=32) df = pd.DataFrame(history.history) df.to_csv("output/bigan/history.csv") encoder.save("output/bigan/encoder.h5") generator.save("output/bigan/generator.h5") discriminator.save("output/bigan/discriminator.h5")
def example_bigan(path, adversarial_optimizer): # z \in R^100 latent_dim = 25 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # encoder (x ->z) encoder = model_encoder(latent_dim, input_shape) # autoencoder (x -> x') autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs))) # discriminator (x -> y) discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape) # bigan (z, x - > yfake, yreal) bigan_generator = simple_bigan(generator, encoder, discriminator_test) bigan_discriminator = simple_bigan(generator, encoder, discriminator_train) # z generated on GPU based on batch dimension of x x = bigan_generator.inputs[1] z = normal_latent_sampling((latent_dim,))(x) # eliminate z from inputs bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names)) bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names)) generative_params = generator.trainable_weights + encoder.trainable_weights # print summary of models generator.summary() encoder.summary() discriminator_train.summary() bigan_discriminator.summary() autoencoder.summary() # build adversarial model model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator], player_params=[generative_params, discriminator_train.trainable_weights], player_names=["generator", "discriminator"]) model.adversarial_compile(adversarial_optimizer=adversarial_optimizer, player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy') # load mnist data xtrain, xtest = mnist_data() # callback for image grid of generated samples def generator_sampler(): zsamples = np.random.normal(size=(10 * 10, latent_dim)) return generator.predict(zsamples).reshape((10, 10, 28, 28)) generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler) # callback for image grid of autoencoded samples def autoencoder_sampler(): xsamples = n_choice(xtest, 10) xrep = np.repeat(xsamples, 9, axis=0) xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28)) xsamples = xsamples.reshape((10, 1, 28, 28)) x = np.concatenate((xsamples, xgen), axis=1) return x autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler) # train network y = gan_targets(xtrain.shape[0]) ytest = gan_targets(xtest.shape[0]) history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb], nb_epoch=100, batch_size=32) # save history df = pd.DataFrame(history.history) df.to_csv(os.path.join(path, "history.csv")) # save model encoder.save(os.path.join(path, "encoder.h5")) generator.save(os.path.join(path, "generator.h5")) discriminator_train.save(os.path.join(path, "discriminator.h5"))