def main(): """Main train loop and models definition.""" def real_gen(): """generator of real values.""" for _ in tf.range(100): yield ((10.0, ), (0, )) num_classes = 1 latent_dim = 100 generator = keras.Sequential([keras.layers.Dense(1)]) left_input = tf.keras.layers.Input(shape=(1, )) left = tf.keras.layers.Dense(10, activation=tf.nn.elu)(left_input) right_input = tf.keras.layers.Input(shape=(latent_dim, )) right = tf.keras.layers.Dense(10, activation=tf.nn.elu)(right_input) net = tf.keras.layers.Concatenate()([left, right]) out = tf.keras.layers.Dense(1)(net) discriminator = tf.keras.Model(inputs=[left_input, right_input], outputs=[out]) encoder = keras.Sequential([keras.layers.Dense(latent_dim)]) generator_bce = GeneratorBCE() encoder_bce = EncoderBCE() minmax = DiscriminatorMinMax() epochs = 100 logdir = "log/adversarial/encoder" # Fake pre-trained classifier classifier = tf.keras.Sequential( [tf.keras.layers.Dense(10), tf.keras.layers.Dense(num_classes)]) metrics = [ EncodingAccuracy(classifier, model_selection_operator=operator.gt, logdir=logdir) ] trainer = EncoderTrainer( generator, discriminator, encoder, tf.optimizers.Adam(1e-4), tf.optimizers.Adam(1e-5), tf.optimizers.Adam(1e-6), generator_bce, minmax, encoder_bce, epochs, metrics=metrics, logdir=logdir, ) batch_size = 10 discriminator_input = tf.data.Dataset.from_generator( real_gen, (tf.float32, tf.int64), ((1), (1))).batch(batch_size) dataset = discriminator_input.map(lambda x, y: ( (x, y), tf.random.normal(shape=(batch_size, latent_dim)))) trainer(dataset)
def __init__( self, logdir: Union[Path, str] = "testlog", kernel_size=(5, 5), metrics=None, callbacks=None, epochs=2, dataset_size=2, batch_size=2, generator_loss=GeneratorBCE(), discriminator_loss=DiscriminatorMinMax(), image_resolution=(28, 28), layer_spec_input_res=(7, 7), layer_spec_target_res=(7, 7), channels=1, output_shape=1, latent_dim=100, # Call parameters measure_performance_freq=10, # Models from outside generator=None, discriminator=None, ): """Fake training loop implementation.""" self.generator_loss = generator_loss self.discriminator_loss = discriminator_loss self.epochs = epochs self.logdir = logdir self.measure_performance_freq = measure_performance_freq # test parameters if callbacks is None: callbacks = [] if metrics is None: metrics = [] self.metrics = metrics self.callbacks = callbacks # Model definition models = basic_dcgan( image_resolution=image_resolution, layer_spec_input_res=layer_spec_input_res, layer_spec_target_res=layer_spec_target_res, kernel_size=kernel_size, channels=channels, output_shape=output_shape, ) if not generator: generator = models[0] if not discriminator: discriminator = models[1] self.generator = generator self.discriminator = discriminator # Trainer self.trainer: AdversarialTrainer self.build_trainer() self.dataset = fake_adversarial_dataset( image_resolution=image_resolution, epochs=epochs, dataset_size=dataset_size, batch_size=batch_size, latent_dim=latent_dim, channels=channels, )
def main(): """Adversarial trainer example.""" strategy = tf.distribute.MirroredStrategy() with strategy.scope(): generator = ConvGenerator( layer_spec_input_res=(7, 7), layer_spec_target_res=(28, 28), kernel_size=(5, 5), initial_filters=256, filters_cap=16, channels=1, ) discriminator = ConvDiscriminator( layer_spec_input_res=(28, 28), layer_spec_target_res=(7, 7), kernel_size=(5, 5), initial_filters=32, filters_cap=128, output_shape=1, ) # Losses generator_bce = GeneratorBCE() minmax = DiscriminatorMinMax() # Trainer logdir = "log/adversarial" # InceptionScore: keep commented until the issues # https://github.com/tensorflow/tensorflow/issues/28599 # https://github.com/tensorflow/hub/issues/295 # Haven't been solved and merged into tf2 metrics = [ # InceptionScore( # InceptionScore.get_or_train_inception( # mnist_dataset, # "mnist", # num_classes=10, # epochs=1, # fine_tuning=False, # logdir=logdir, # ), # model_selection_operator=operator.gt, # logdir=logdir, # ) ] epochs = 50 trainer = AdversarialTrainer( generator=generator, discriminator=discriminator, generator_optimizer=tf.optimizers.Adam(1e-4), discriminator_optimizer=tf.optimizers.Adam(1e-4), generator_loss=generator_bce, discriminator_loss=minmax, epochs=epochs, metrics=metrics, logdir=logdir, ) batch_size = 512 # Real data mnist_x, mnist_y = keras.datasets.mnist.load_data()[0] def iterator(): """Define an iterator in order to do not load in memory all the dataset.""" for image, label in zip(mnist_x, mnist_y): yield tf.image.convert_image_dtype(tf.expand_dims(image, -1), tf.float32), tf.expand_dims( label, -1) real_data = (tf.data.Dataset.from_generator( iterator, (tf.float32, tf.int64), ((28, 28, 1), (1, ))).batch(batch_size).prefetch(1)) # Add noise in the same dataset, just by mapping. # The return type of the dataset must be: tuple(tuple(a,b), noise) dataset = real_data.map( lambda x, y: ((x, y), tf.random.normal(shape=(batch_size, 100)))) trainer(dataset)
def fake_training_loop( adversarial_logdir, generator=None, discriminator=None, metrics=None, callbacks=None, epochs=2, dataset_size=2, batch_size=2, generator_loss=GeneratorBCE(), discriminator_loss=DiscriminatorMinMax(), image_resolution=(28, 28), layer_spec_input_res=(7, 7), layer_spec_target_res=(7, 7), channels=1, ): """Fake training loop implementation.""" # test parameters if callbacks is None: callbacks = [] if metrics is None: metrics = [] kernel_size = (5, 5) latent_dim = 100 # model definition if generator is None: generator = ConvGenerator( layer_spec_input_res=layer_spec_input_res, layer_spec_target_res=image_resolution, kernel_size=kernel_size, initial_filters=32, filters_cap=16, channels=channels, ) if discriminator is None: discriminator = ConvDiscriminator( layer_spec_input_res=image_resolution, layer_spec_target_res=layer_spec_target_res, kernel_size=kernel_size, initial_filters=16, filters_cap=32, output_shape=1, ) # Real data data_x, data_y = ( tf.zeros((dataset_size, image_resolution[0], image_resolution[1], channels)), tf.zeros((dataset_size, 1)), ) # Trainer trainer = AdversarialTrainer( generator=generator, discriminator=discriminator, generator_optimizer=tf.optimizers.Adam(1e-4), discriminator_optimizer=tf.optimizers.Adam(1e-4), generator_loss=generator_loss, discriminator_loss=discriminator_loss, epochs=epochs, metrics=metrics, callbacks=callbacks, logdir=adversarial_logdir, ) # Dataset # take only 2 samples to speed up tests real_data = ( tf.data.Dataset.from_tensor_slices((data_x, data_y)) .take(dataset_size) .batch(batch_size) .prefetch(1) ) # Add noise in the same dataset, just by mapping. # The return type of the dataset must be: tuple(tuple(a,b), noise) dataset = real_data.map( lambda x, y: ((x, y), tf.random.normal(shape=(batch_size, latent_dim))) ) trainer(dataset)