Ejemplo n.º 1
0
def _models():
    image_resolution = (28, 28)
    layer_spec_input_res = (7, 7)
    layer_spec_target_res = (7, 7)
    kernel_size = 5
    channels = 1

    # Model definition
    generator = ConvGenerator(
        layer_spec_input_res=layer_spec_input_res,
        layer_spec_target_res=image_resolution,
        kernel_size=kernel_size,
        initial_filters=32,
        filters_cap=16,
        channels=channels,
    )

    discriminator = ConvDiscriminator(
        layer_spec_input_res=image_resolution,
        layer_spec_target_res=layer_spec_target_res,
        kernel_size=kernel_size,
        initial_filters=16,
        filters_cap=32,
        output_shape=1,
    )

    return generator, discriminator
Ejemplo n.º 2
0
def test_metrics(adversarial_logdir: str):
    """
    Test the integration between metrics and trainer
    """
    # test parameters
    image_resolution = (256, 256)

    metrics = [
        SlicedWassersteinDistance(logdir=adversarial_logdir,
                                  resolution=image_resolution[0]),
        SSIM_Multiscale(logdir=adversarial_logdir),
        InceptionScore(
            # Fake inception model
            ConvDiscriminator(
                layer_spec_input_res=(299, 299),
                layer_spec_target_res=(7, 7),
                kernel_size=(5, 5),
                initial_filters=16,
                filters_cap=32,
                output_shape=10,
            ),
            logdir=adversarial_logdir,
        ),
    ]

    fake_training_loop(
        adversarial_logdir,
        metrics=metrics,
        image_resolution=image_resolution,
        layer_spec_input_res=(8, 8),
        layer_spec_target_res=(8, 8),
        channels=3,
    )

    # assert there exists folder for each metric
    for metric in metrics:
        metric_dir = os.path.join(adversarial_logdir, "best", metric.name)
        assert os.path.exists(metric_dir)
        json_path = os.path.join(metric_dir, f"{metric.name}.json")
        assert os.path.exists(json_path)
        with open(json_path, "r") as fp:
            metric_data = json.load(fp)

            # assert the metric data contains the expected keys
            assert metric.name in metric_data
            assert "step" in metric_data
Ejemplo n.º 3
0
def _test_save_callback_helper(adversarial_logdir, save_format,
                               save_sub_format, save_dir):
    image_resolution = (28, 28)
    layer_spec_input_res = (7, 7)
    layer_spec_target_res = (7, 7)
    kernel_size = 5
    channels = 1

    # model definition
    generator = ConvGenerator(
        layer_spec_input_res=layer_spec_input_res,
        layer_spec_target_res=image_resolution,
        kernel_size=kernel_size,
        initial_filters=32,
        filters_cap=16,
        channels=channels,
    )

    discriminator = ConvDiscriminator(
        layer_spec_input_res=image_resolution,
        layer_spec_target_res=layer_spec_target_res,
        kernel_size=kernel_size,
        initial_filters=16,
        filters_cap=32,
        output_shape=1,
    )

    callbacks = [
        SaveCallback(
            models=[generator, discriminator],
            save_dir=save_dir,
            verbose=1,
            save_format=save_format,
            save_sub_format=save_sub_format,
        )
    ]

    fake_training_loop(
        adversarial_logdir,
        callbacks=callbacks,
        generator=generator,
        discriminator=discriminator,
    )
Ejemplo n.º 4
0
def main():
    """Adversarial trainer example."""
    strategy = tf.distribute.MirroredStrategy()
    with strategy.scope():

        generator = ConvGenerator(
            layer_spec_input_res=(7, 7),
            layer_spec_target_res=(28, 28),
            kernel_size=(5, 5),
            initial_filters=256,
            filters_cap=16,
            channels=1,
        )

        discriminator = ConvDiscriminator(
            layer_spec_input_res=(28, 28),
            layer_spec_target_res=(7, 7),
            kernel_size=(5, 5),
            initial_filters=32,
            filters_cap=128,
            output_shape=1,
        )

        # Losses
        generator_bce = GeneratorBCE()
        minmax = DiscriminatorMinMax()

        # Trainer
        logdir = "log/adversarial"

        # InceptionScore: keep commented until the issues
        # https://github.com/tensorflow/tensorflow/issues/28599
        # https://github.com/tensorflow/hub/issues/295
        # Haven't been solved and merged into tf2

        metrics = [
            # InceptionScore(
            #    InceptionScore.get_or_train_inception(
            #        mnist_dataset,
            #        "mnist",
            #        num_classes=10,
            #        epochs=1,
            #        fine_tuning=False,
            #        logdir=logdir,
            #    ),
            #    model_selection_operator=operator.gt,
            #    logdir=logdir,
            # )
        ]

        epochs = 50
        trainer = AdversarialTrainer(
            generator=generator,
            discriminator=discriminator,
            generator_optimizer=tf.optimizers.Adam(1e-4),
            discriminator_optimizer=tf.optimizers.Adam(1e-4),
            generator_loss=generator_bce,
            discriminator_loss=minmax,
            epochs=epochs,
            metrics=metrics,
            logdir=logdir,
        )

        batch_size = 512

        # Real data
        mnist_x, mnist_y = keras.datasets.mnist.load_data()[0]

        def iterator():
            """Define an iterator in order to do not load in memory all the dataset."""
            for image, label in zip(mnist_x, mnist_y):
                yield tf.image.convert_image_dtype(tf.expand_dims(image, -1),
                                                   tf.float32), tf.expand_dims(
                                                       label, -1)

        real_data = (tf.data.Dataset.from_generator(
            iterator, (tf.float32, tf.int64),
            ((28, 28, 1), (1, ))).batch(batch_size).prefetch(1))

        # Add noise in the same dataset, just by mapping.
        # The return type of the dataset must be: tuple(tuple(a,b), noise)
        dataset = real_data.map(
            lambda x, y: ((x, y), tf.random.normal(shape=(batch_size, 100))))

        trainer(dataset)
Ejemplo n.º 5
0
def fake_training_loop(
    adversarial_logdir,
    generator=None,
    discriminator=None,
    metrics=None,
    callbacks=None,
    epochs=2,
    dataset_size=2,
    batch_size=2,
    generator_loss=GeneratorBCE(),
    discriminator_loss=DiscriminatorMinMax(),
    image_resolution=(28, 28),
    layer_spec_input_res=(7, 7),
    layer_spec_target_res=(7, 7),
    channels=1,
):
    """Fake training loop implementation."""
    # test parameters
    if callbacks is None:
        callbacks = []
    if metrics is None:
        metrics = []
    kernel_size = (5, 5)
    latent_dim = 100

    # model definition
    if generator is None:
        generator = ConvGenerator(
            layer_spec_input_res=layer_spec_input_res,
            layer_spec_target_res=image_resolution,
            kernel_size=kernel_size,
            initial_filters=32,
            filters_cap=16,
            channels=channels,
        )

    if discriminator is None:
        discriminator = ConvDiscriminator(
            layer_spec_input_res=image_resolution,
            layer_spec_target_res=layer_spec_target_res,
            kernel_size=kernel_size,
            initial_filters=16,
            filters_cap=32,
            output_shape=1,
        )

    # Real data
    data_x, data_y = (
        tf.zeros((dataset_size, image_resolution[0], image_resolution[1], channels)),
        tf.zeros((dataset_size, 1)),
    )

    # Trainer
    trainer = AdversarialTrainer(
        generator=generator,
        discriminator=discriminator,
        generator_optimizer=tf.optimizers.Adam(1e-4),
        discriminator_optimizer=tf.optimizers.Adam(1e-4),
        generator_loss=generator_loss,
        discriminator_loss=discriminator_loss,
        epochs=epochs,
        metrics=metrics,
        callbacks=callbacks,
        logdir=adversarial_logdir,
    )

    # Dataset
    # take only 2 samples to speed up tests
    real_data = (
        tf.data.Dataset.from_tensor_slices((data_x, data_y))
        .take(dataset_size)
        .batch(batch_size)
        .prefetch(1)
    )

    # Add noise in the same dataset, just by mapping.
    # The return type of the dataset must be: tuple(tuple(a,b), noise)
    dataset = real_data.map(
        lambda x, y: ((x, y), tf.random.normal(shape=(batch_size, latent_dim)))
    )

    trainer(dataset)