Exemple #1
0
    def test_discriminator_output_shape(self):
        model_parameters = edict({
            'img_height': 32,
            'img_width': 32,
            'num_channels': 3,
        })
        d = discriminator.Discriminator(model_parameters)
        inputs = tf.ones(shape=[4, 32, 32, 3])
        output_img = d(inputs)

        actual_shape = output_img.shape
        expected_shape = (4, 1)
        self.assertEqual(actual_shape, expected_shape)
def discriminator_model_factory(
    input_params,
    dataset_type: pt.ProblemType,
):
    if dataset_type == pt.ProblemType.VANILLA_MNIST.name:
        return discriminator.Discriminator(input_params)
    if dataset_type == pt.ProblemType.VANILLA_FASHION_MNIST.name:
        return discriminator.Discriminator(input_params)
    elif dataset_type == pt.ProblemType.VANILLA_CIFAR10.name:
        return discriminator.Discriminator(input_params)
    elif dataset_type == pt.ProblemType.CONDITIONAL_MNIST.name:
        return conditional_discriminator.ConditionalDiscriminator(input_params)
    elif dataset_type == pt.ProblemType.CONDITIONAL_FASHION_MNIST.name:
        return conditional_discriminator.ConditionalDiscriminator(input_params)
    elif dataset_type == pt.ProblemType.CONDITIONAL_CIFAR10.name:
        return conditional_discriminator.ConditionalDiscriminatorCifar10(
            input_params)
    elif dataset_type == pt.ProblemType.CYCLE_SUMMER2WINTER.name:
        return [
            patch_discriminator.PatchDiscriminator(input_params),
            patch_discriminator.PatchDiscriminator(input_params)
        ]
    else:
        raise NotImplementedError
Exemple #3
0
})

dataset = mnist.MnistDataset(model_parameters, with_labels=True)


def validation_dataset():
    test_batch_size = model_parameters.num_classes ** 2
    labels = np.repeat(list(range(model_parameters.num_classes)), model_parameters.num_classes)
    validation_samples = [tf.random.normal([test_batch_size, model_parameters.latent_size]), np.array(labels)]
    return validation_samples


validation_dataset = validation_dataset()

generator = latent_to_image.LatentToImageGenerator(model_parameters)
discriminator = discriminator.Discriminator(model_parameters)

generator_optimizer = optimizers.Adam(
    learning_rate=model_parameters.learning_rate_generator,
    beta_1=0.5,
)
discriminator_optimizer = optimizers.Adam(
    learning_rate=model_parameters.learning_rate_discriminator,
    beta_1=0.5,
)

callbacks = [
    saver.ImageProblemSaver(
        save_images_every_n_steps=model_parameters.save_images_every_n_steps,
    )
]