Example #1
0
    def make_discriminator(self):
        """Creates a discriminator model that takes an image as input and outputs a single value, representing whether
        the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
        Instead, the output should be as large and negative as possible for generated inputs and as large and positive
        as possible for real inputs.
        Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator."""

        model = Sequential()

        model.add(
            Lambda(lambda image: ktf.image.resize_images(image, (64, 64)),
                   input_shape=self.img_shape))

        # 1st hidden layer
        model.add(Conv2D(128, (4, 4), strides=(2, 2), padding='same'))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # 2nd hidden layer
        model.add(Conv2D(256, (4, 4), strides=(2, 2), padding='same'))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # 3rd hidden layer
        model.add(Conv2D(512, (4, 4), strides=(2, 2), padding='same'))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # 4th hidden layer
        model.add(Conv2D(1024, (4, 4), strides=(2, 2), padding='same'))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # output layer
        model.add(Conv2D(1, (4, 4), strides=(1, 1), padding='valid'))
        model.add(Activation('sigmoid'))

        return model
Example #2
0
    def make_generator(self):
        """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images
        of size 28x28x1."""

        model = Sequential()
        model.add(
            Reshape((1, 1, self.noise_shape[0]), input_shape=self.noise_shape))

        # 1st hidden layer
        model.add(
            Conv2DTranspose(1024, (4, 4), strides=(1, 1), padding='valid'))
        model.add(BatchNormalization(momentum=0.99))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # 2nd hidden layer
        model.add(Conv2DTranspose(512, (4, 4), strides=(2, 2), padding='same'))
        model.add(BatchNormalization(momentum=0.99))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # 3rd hidden layer
        model.add(Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same'))
        model.add(BatchNormalization(momentum=0.99))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # 4th hidden layer
        model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'))
        model.add(BatchNormalization(momentum=0.99))
        model.add(Lambda(lambda x: ktf.maximum(0.2 * x, x)))

        # output layer
        model.add(Conv2DTranspose(1, (4, 4), strides=(2, 2), padding='same'))
        model.add(Activation('tanh'))

        return model
Example #3
0
File: gan.py Project: arosset42/gan
 def hinge_loss_fake(logits):
     return ktf.reduce_mean(ktf.maximum(0.0, 1.0 + logits))
Example #4
0
File: gan.py Project: arosset42/gan
 def hinge_loss_true(logits):
     return ktf.reduce_mean(ktf.maximum(0.0, 1.0 - logits))
Example #5
0
def get_lr_decay_schedule(args):
    number_of_iters_generator = 1000. * args.number_of_epochs
    number_of_iters_discriminator = 1000. * args.number_of_epochs * args.training_ratio

    if args.lr_decay_schedule is None:
        lr_decay_schedule_generator = lambda iter: 1.
        lr_decay_schedule_discriminator = lambda iter: 1.
    elif args.lr_decay_schedule == 'linear':
        lr_decay_schedule_generator = lambda iter: K.maximum(
            0., 1. - K.cast(iter, 'float32') / number_of_iters_generator)
        lr_decay_schedule_discriminator = lambda iter: K.maximum(
            0., 1. - K.cast(iter, 'float32') / number_of_iters_discriminator)
    elif args.lr_decay_schedule == 'half-linear':
        lr_decay_schedule_generator = lambda iter: ktf.where(
            K.less(iter, K.cast(number_of_iters_generator / 2, 'int64')),
            ktf.maximum(
                0., 1. -
                (K.cast(iter, 'float32') / number_of_iters_generator)), 0.5)
        lr_decay_schedule_discriminator = lambda iter: ktf.where(
            K.less(iter, K.cast(number_of_iters_discriminator / 2, 'int64')),
            ktf.maximum(
                0., 1. - (K.cast(iter, 'float32') /
                          number_of_iters_discriminator)), 0.5)
    elif args.lr_decay_schedule == 'linear-end':
        decay_at = 0.828

        number_of_iters_until_decay_generator = number_of_iters_generator * decay_at
        number_of_iters_until_decay_discriminator = number_of_iters_discriminator * decay_at

        number_of_iters_after_decay_generator = number_of_iters_generator * (
            1 - decay_at)
        number_of_iters_after_decay_discriminator = number_of_iters_discriminator * (
            1 - decay_at)

        lr_decay_schedule_generator = lambda iter: ktf.where(
            K.greater(iter,
                      K.cast(number_of_iters_until_decay_generator, 'int64')),
            ktf.maximum(
                0., 1. - (K.cast(iter, 'float32') -
                          number_of_iters_until_decay_generator) /
                number_of_iters_after_decay_generator), 1)
        lr_decay_schedule_discriminator = lambda iter: ktf.where(
            K.greater(
                iter, K.cast(number_of_iters_until_decay_discriminator, 'int64'
                             )),
            ktf.maximum(
                0., 1. - (K.cast(iter, 'float32') -
                          number_of_iters_until_decay_discriminator) /
                number_of_iters_after_decay_discriminator), 1)
    elif args.lr_decay_schedule.startswith("dropat"):
        drop_at = int(args.lr_decay_schedule.replace('dropat', ''))
        drop_at_generator = drop_at * 1000
        drop_at_discriminator = drop_at * 1000 * args.training_ratio
        print("Drop at generator %s" % drop_at_generator)
        lr_decay_schedule_generator = lambda iter: (ktf.where(
            K.less(iter, drop_at_generator), 1., 0.1) * K.maximum(
                0., 1. - K.cast(iter, 'float32') / number_of_iters_generator))
        lr_decay_schedule_discriminator = lambda iter: (ktf.where(
            K.less(iter, drop_at_discriminator), 1., 0.1) * K.maximum(
                0., 1. - K.cast(iter, 'float32') /
                number_of_iters_discriminator))
    else:
        assert False

    return lr_decay_schedule_generator, lr_decay_schedule_discriminator