示例#1
0
    def ali_model(self):
        self.encoder = self.encoder_model()
        self.decoder = self.decoder_model()
        disc_train_enc_dec, disc_train_disc = self.discriminator_model()

        bigan_train_enc_dec = simple_bigan(self.decoder, self.encoder,
                                           disc_train_enc_dec)
        bigan_train_disc = simple_bigan(self.decoder, self.encoder,
                                        disc_train_disc)

        x = bigan_train_enc_dec.inputs[1]
        z = normal_latent_sampling(self.embedding_size)(x)

        # fix names???
        bigan_train_enc_dec = Model(x, bigan_train_enc_dec([z, x]))
        bigan_train_disc = Model(x, bigan_train_disc([z, x]))

        # encoder.summary()
        # decoder.summary()
        # disc_train_enc_dec.summary()
        # disc_train_disc.summary()
        # bigan_train_enc_dec.summary()
        # bigan_train_disc.summary()

        model = AdversarialModel(player_models=[bigan_train_enc_dec, bigan_train_disc],
                                 player_params=[self.encoder.trainable_weights + self.encoder.trainable_weights, \
                                                disc_train_disc.trainable_weights],
                                 player_names=['encoder_decoder', 'discriminator'])
        model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerScheduled(self.train_schedule),
                                  player_optimizers=[Adam(lr=5e-5, beta_1=0.5, beta_2=1e-3), \
                                                     Adam(lr=5e-5, beta_1=0.5, beta_2=1e-3)],
                                  loss=['mean_squared_error', 'mean_squared_error'])
        return model
示例#2
0
def main():
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator = model_discriminator(latent_dim, input_shape)
    # bigan (x - > yfake, yreal), z generated on GPU
    bigan = simple_bigan(generator, encoder, discriminator,
                         normal_latent_sampling((latent_dim, )))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    bigan.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(
        base_model=bigan,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    # train model
    xtrain, xtest = mnist_data()

    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png",
                                     generator_sampler)

    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(
        "output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler)

    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/bigan/history.csv")

    encoder.save("output/bigan/encoder.h5")
    generator.save("output/bigan/generator.h5")
    discriminator.save("output/bigan/discriminator.h5")
示例#3
0
def example_bigan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 25
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(
        latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim, ))(x)
    # eliminate z from inputs
    bigan_generator = Model([x],
                            fix_names(bigan_generator([z, x]),
                                      bigan_generator.output_names))
    bigan_discriminator = Model([x],
                                fix_names(bigan_discriminator([z, x]),
                                          bigan_discriminator.output_names))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(
        player_models=[bigan_generator, bigan_discriminator],
        player_params=[
            generative_params, discriminator_train.trainable_weights
        ],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(
        os.path.join(path, "autoencoded-epoch-{:03d}.png"),
        autoencoder_sampler)

    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
def driver_gan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 3
    # x \in R^{28x28}
    input_shape = (15, 6)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
    bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))

    # Merging encoder weights and generator weights
    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
                             player_params=[generative_params, discriminator_train.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-7, decay=1e-7), Adam(1e-6, decay=1e-7)],
                              loss='binary_crossentropy')

    # load driver data
    train_dataset = [1,2,5]
    test_dataset = [3,4]
    train_reader = data_base(train_dataset)
    test_reader = data_base(test_dataset)
    xtrain, xtest = train_reader.read_files(),test_reader.read_files()
    # ---------------------------------------------------------------------------------
    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(1 * 1, latent_dim))  #---------------------------------> (10,10)
        return generator.predict(zsamples).reshape((1, 1, 15, 6))# confused ***********************************default (10,10,28,28)


    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10) # the number of testdata set
        xrep = np.repeat(xsamples, 5, axis=0) # the number of train dataset
        xgen = autoencoder.predict(xrep).reshape((1, 1, 15, 6))
        xsamples = xsamples.reshape((1, 1, 15, 6))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x


    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest),
                        nb_epoch=25, batch_size=10, verbose=0)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
def main():
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator = model_discriminator(latent_dim, input_shape)
    # bigan (x - > yfake, yreal), z generated on GPU
    bigan = simple_bigan(generator, encoder, discriminator, normal_latent_sampling((latent_dim,)))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    bigan.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(base_model=bigan,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # train model
    xtrain, xtest = mnist_data()

    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png", generator_sampler)

    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback("output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler)

    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100, batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/bigan/history.csv")

    encoder.save("output/bigan/encoder.h5")
    generator.save("output/bigan/generator.h5")
    discriminator.save("output/bigan/discriminator.h5")
def example_bigan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 25
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
    bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
                             player_params=[generative_params, discriminator_train.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler)

    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))