예제 #1
0
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim, targets=gan_targets, loss="binary_crossentropy"):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return
    print("Training: {}".format(csvpath))
    # gan (x -> yfake, yreal), z is gaussian generated on GPU
    # can also experiment with uniform_latent_sampling
    d_g = discriminator(0)
    d_d = discriminator(0.5)
    generator.summary()
    d_d.summary()
    gan_g = simple_gan(generator, d_g, None)
    gan_d = simple_gan(generator, d_d, None)
    x = gan_g.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    gan_g = Model([x], fix_names(gan_g([z, x]), gan_g.output_names))
    gan_d = Model([x], fix_names(gan_d([z, x]), gan_d.output_names))

    # build adversarial model
    model = AdversarialModel(player_models=[gan_g, gan_d],
                             player_params=[generator.trainable_weights,
                                            d_d.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = generator.predict(zsamples)
        xpred = dim_ordering_unfix(xpred.transpose((0, 2, 3, 1)))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(
        os.path.join(path, "epoch-{:03d}.png"),
        generator_sampler, cmap=None
    )

    callbacks = [generator_cb]
    if K.backend() == "tensorflow":
        callbacks.append(TensorBoard(log_dir=os.path.join(path, "logs"), histogram_freq=0, write_graph=True, write_images=True))

    # train model
    x_train, x_test = cifar10_data()
    y = targets(x_train.shape[0])
    y_test = targets(x_test.shape[0])
    history = model.fit(x=x_train, y=y, validation_data=(x_test, y_test), callbacks=callbacks, epochs=nb_epoch, batch_size=32)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    d_d.save(os.path.join(path, "discriminator.h5"))
예제 #2
0
def aae_model(path, adversarial_optimizer,xtrain,encoded_dim=100,img_dim=25, nb_epoch=20):
    # z \in R^100
    latent_dim = encoded_dim
    # x \in R^{28x28}
    input_shape = (img_dim,)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)), name="autoencoder")
    # discriminator (z -> y)
    discriminator = model_discriminator(input_shape)

    # assemple AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)

    yreal = discriminator(x)
    yfake = discriminator(xpred)
    aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))

    # print summary of models
    encoder.summary()
    generator.summary()
    
    discriminator.summary()
    #autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(base_model=aae,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
                             
    #parallel_model = multi_gpu_model(model, gpus=4)
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                             player_optimizers=[Adadelta(),Adadelta()],
                             loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
                                   "xpred": "binary_crossentropy"},
                                   player_compile_kwargs=[{"loss_weights": {"yfake": 1e-4, "yreal": 1e-4, "xpred": 1e1}}]*2)
    # train network
    n = xtrain.shape[0]
    y = [xtrain, np.ones((n, 1)), np.zeros((n, 1)), xtrain, np.zeros((n, 1)), np.ones((n, 1))]
    ntest = xtest.shape[0]
    #ytest = [xtest, np.ones((ntest, 1)), np.zeros((ntest, 1)), xtest, np.zeros((ntest, 1)), np.ones((ntest, 1))]
    history = model.fit(x=xtrain, y=y, epochs=nb_epoch, batch_size=128, shuffle=False)
    
    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "aae_history.csv"))
    # save model
    encoder.save(os.path.join(path, "aae_encoder.h5"))
    generator.save(os.path.join(path, "aae_decoder.h5"))
    discriminator.save(os.path.join(path, "aae_discriminator.h5"))
    K.clear_session()
예제 #3
0
    def __build_connected_network_train(self, main=True):
        x = self.encoder.inputs[0]
        e1, e2 = self.encoder(x)
        
        noisy_e1 = self.noisy_transformer(e1)

        y = self.predictor(e1)
        x_pred = self.decoder([noisy_e1, e2])

        e1_dim = int(self.encoder.outputs[0].shape[-1])
        e2_dim = int(self.encoder.outputs[1].shape[-1])

        output_vars = [y, x_pred]
        output_names = ['y', 'x_pred']

        e1_target = e1
        e2_target = e2
        e2_pred = self.disentangler1(e1)
        e1_pred = self.disentangler2(e2)

        if main:
            embedding_activation = self.model_config.embedding_activation
            e2_target = self.__random_target(x, e2_dim, embedding_activation)
            e1_target = self.__random_target(x, e1_dim, embedding_activation)

        e1_e1_pred = Concatenate()([e1_target, e1_pred])
        output_vars.append(e1_e1_pred)
        output_names.append('e1pred')
        
        e2_e2_pred = Concatenate()([e2_target, e2_pred])
        output_vars.append(e2_e2_pred)
        output_names.append('e2pred')
        
        if self.z_discriminator is not None:
            z = self.z_discriminator(e1)
            output_vars.append(z)
            output_names.append('z')

        outputs = fix_names(output_vars, output_names)
        network = Model(inputs=[x], outputs=outputs)

        return network
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim)

    # assemple AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim, ))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal],
                             ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(
        base_model=aae,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss={
            "yfake": "binary_crossentropy",
            "yreal": "binary_crossentropy",
            "xpred": "mean_squared_error"
        },
        compile_kwargs={
            "loss_weights": {
                "yfake": 1e-2,
                "yreal": 1e-2,
                "xpred": 1
            }
        })

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        samples = np.concatenate((xsamples, xgen), axis=1)
        return samples

    autoencoder_cb = ImageGridCallback(
        os.path.join(path, "autoencoded-epoch-{:03d}.png"),
        autoencoder_sampler)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    y = [
        xtrain,
        np.ones((n, 1)),
        np.zeros((n, 1)), xtrain,
        np.zeros((n, 1)),
        np.ones((n, 1))
    ]
    ntest = xtest.shape[0]
    ytest = [
        xtest,
        np.ones((ntest, 1)),
        np.zeros((ntest, 1)), xtest,
        np.zeros((ntest, 1)),
        np.ones((ntest, 1))
    ]
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
예제 #5
0
def example_bigan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 25
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(
        latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim, ))(x)
    # eliminate z from inputs
    bigan_generator = Model([x],
                            fix_names(bigan_generator([z, x]),
                                      bigan_generator.output_names))
    bigan_discriminator = Model([x],
                                fix_names(bigan_discriminator([z, x]),
                                          bigan_discriminator.output_names))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(
        player_models=[bigan_generator, bigan_discriminator],
        player_params=[
            generative_params, discriminator_train.trainable_weights
        ],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(
        os.path.join(path, "autoencoded-epoch-{:03d}.png"),
        autoencoder_sampler)

    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
예제 #6
0
    def __init__(self, uNum, iNum, dim, weight, pop_percent):

        self.uNum = uNum
        self.iNum = iNum
        self.dim = dim
        self.weight = weight
        self.pop_percent = pop_percent

        # Define user input -- user index (an integer)
        userInput = Input(shape=(1,), dtype="int32")
        itemInput = Input(shape=(1,), dtype="int32")
        userAdvInput = Input(shape=(1,), dtype="int32")
        itemAdvInput = Input(shape=(1,), dtype="int32")

        userEmbeddingLayer = Embedding(input_dim=uNum, output_dim=dim)
        itemEmbeddingLayer = Embedding(input_dim=iNum, output_dim=dim)

        uEmb = Flatten()(userEmbeddingLayer(userInput))
        iEmb = Flatten()(itemEmbeddingLayer(itemInput))
        uAdvEmb = Flatten()(userEmbeddingLayer(userAdvInput))
        iAdvEmb = Flatten()(itemEmbeddingLayer(itemAdvInput))

        self.uEncoder = Model(userInput, uEmb)
        self.iEncoder = Model(itemInput, iEmb)

        self.discriminator_i = self.generate_discriminator()
        self.discriminator_i.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy'])
        self.discriminator_i.trainable = False
        validity = self.discriminator_i(iAdvEmb)

        self.discriminator_u = self.generate_discriminator()
        self.discriminator_u.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy'])
        self.discriminator_u.trainable = False
        validity_u = self.discriminator_u(uAdvEmb)

        pred = dot([uEmb, iEmb], axes=-1)
        # pred = merge([uEmb, iEmb], mode="concat")

        self.model = Model([userInput, itemInput], pred)
        # self.model.compile(optimizer="adam", loss="mean_squared_error", metrics=['mse'])

        # self.advModel = Model([userInput, itemInput, userAdvInput, itemAdvInput], [pred, validity_u, validity])
        # self.advModel.compile(optimizer="adam",
        #                       loss=["mean_squared_error", "binary_crossentropy", "binary_crossentropy"],
        #                       metrics=['mse', 'acc', 'acc'], loss_weights=[1, self.weight, self.weight])

        self.aae = Model([userInput, itemInput, userAdvInput, itemAdvInput],
                         fix_names([pred, validity_u, validity], ["xpred", "upred", "ipred"]))

        mf_params = self.uEncoder.trainable_weights + self.iEncoder.trainable_weights
        self.advModel = AdversarialModel(base_model=self.aae,
                                         player_params=[mf_params, self.discriminator_u.trainable_weights,
                                                        self.discriminator_i.trainable_weights],
                                         player_names=["mf", "disc_u", "disc_i"])

        self.advModel.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),
                                          player_optimizers=[Adam(), Adam(), Adam()],
                                          loss={"upred": "binary_crossentropy", "ipred": "binary_crossentropy",
                                                "xpred": "mean_squared_error"},
                                          player_compile_kwargs=[{"loss_weights": {"upred": 1, "ipred": 1,
                                                                                   "xpred": 1}}] * 2)
예제 #7
0
def driver_gan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 3
    # x \in R^{28x28}
    input_shape = (15, 6)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
    bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))

    # Merging encoder weights and generator weights
    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
                             player_params=[generative_params, discriminator_train.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-7, decay=1e-7), Adam(1e-6, decay=1e-7)],
                              loss='binary_crossentropy')

    # load driver data
    train_dataset = [1,2,5]
    test_dataset = [3,4]
    train_reader = data_base(train_dataset)
    test_reader = data_base(test_dataset)
    xtrain, xtest = train_reader.read_files(),test_reader.read_files()
    # ---------------------------------------------------------------------------------
    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(1 * 1, latent_dim))  #---------------------------------> (10,10)
        return generator.predict(zsamples).reshape((1, 1, 15, 6))# confused ***********************************default (10,10,28,28)


    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10) # the number of testdata set
        xrep = np.repeat(xsamples, 5, axis=0) # the number of train dataset
        xgen = autoencoder.predict(xrep).reshape((1, 1, 15, 6))
        xsamples = xsamples.reshape((1, 1, 15, 6))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x


    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest),
                        nb_epoch=25, batch_size=10, verbose=0)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
예제 #8
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 256
    units = 512
    # x \in R^{28x28}
    ##input_shape = dim_ordering_shape((3, 128, 170))
    input_shape = dim_ordering_shape((3, 32, 32))
    #input_shape = (3,32,32)
    ###input_shape = (48,48,3)

    # generator (z -> x)

    generator = model_generator(latent_dim, units=units)
    # encoder (x ->z)

    encoder = model_encoder(latent_dim, input_shape, units=units)
    # autoencoder (x -> x')

    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)

    discriminator = model_discriminator(latent_dim, units=units)

    # build AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim, ))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal],
                             ["xpred", "yfake", "yreal"]))

    # print summary of models
    print("generator (z -> x)")
    generator.summary()
    print("encoder (x ->z)")
    encoder.summary()
    print("autoencoder (x -> x')")
    discriminator.summary()
    print("discriminator (z -> y)")
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(
        base_model=aae,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(3e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss={
            "yfake": "binary_crossentropy",
            "yreal": "binary_crossentropy",
            "xpred": "mean_squared_error"
        },
        player_compile_kwargs=[{
            "loss_weights": {
                "yfake": 1e-1,
                "yreal": 1e-1,
                "xpred": 1e2
            }
        }] * 2)

    # load mnist data
    xtrain, xtest = benthoz_data()
    print("xtrain shapes {}".format(xtrain.shape))
    print("xtrain mean val {}".format(np.mean(xtrain)))

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return dim_ordering_unfix(generator.predict(zsamples)).transpose(
            (0, 2, 3, 1)).reshape((10, 10, 32, 32, 3))
        #return generator.predict(zsamples).reshape((10, 10, 32, 32, 3))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = dim_ordering_unfix(autoencoder.predict(xrep)).reshape(
            (10, 9, 3, 32, 32))
        xsamples = dim_ordering_unfix(xsamples).reshape((10, 1, 3, 32, 32))
        #xgen = autoencoder.predict(xrep).reshape((10, 9, 32, 32, 3))
        #xsamples = xsamples.reshape((10, 1, 32, 32,3))

        samples = np.concatenate((xsamples, xgen), axis=1)
        samples = samples.transpose((0, 1, 3, 4, 2))
        return samples

    autoencoder_cb = ImageGridCallback(os.path.join(
        path, "autoencoded-epoch-{:03d}.png"),
                                       autoencoder_sampler,
                                       cmap=None)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    print("num train samples {}".format(n))
    y = [
        xtrain,
        np.ones((n, 1)),
        np.zeros((n, 1)), xtrain,
        np.zeros((n, 1)),
        np.ones((n, 1))
    ]
    ntest = xtest.shape[0]
    ytest = [
        xtest,
        np.ones((ntest, 1)),
        np.zeros((ntest, 1)), xtest,
        np.zeros((ntest, 1)),
        np.ones((ntest, 1))
    ]
    history = fit(model,
                  x=xtrain,
                  y=y,
                  validation_data=(xtest, ytest),
                  callbacks=[generator_cb, autoencoder_cb],
                  nb_epoch=100,
                  batch_size=32)

    #history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
예제 #9
0
파일: flipencoder.py 프로젝트: yss4/FAAE
def example_faae(path, adversarial_optimizer):

    latent_dim = 256
    units = 512

    input_shape = dim_ordering_shape((3, 32, 32))

    # generator (z -> x)
    generator = model_generator(latent_dim, units=units)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape, units=units)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator()

    # build FAAE
    zreal = discriminator.inputs[0]
    x = generator.inputs[0]
    z = generator(x)
    xpred = encoder(z)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model([zreal, x],
                fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()

    #encoder.load_weights(os.path.join(path, "encoder.h5"))
    #generator.load_weights(os.path.join(path, "generator.h5"))
    #discriminator.load_weights(os.path.join(path, "discriminator.h5"))

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(
        base_model=aae,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(3e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss={
            "yfake": "binary_crossentropy",
            "yreal": "binary_crossentropy",
            "xpred": "mean_squared_error"
        },
        player_compile_kwargs=[{
            "loss_weights": {
                "yfake": 1,
                "yreal": 1,
                "xpred": 8
            }
        }] * 2)

    xtrain, xtest = cifar10_data()

    def generator_sampler():
        zsamples = np.random.randn(10 * 10, latent_dim)
        return dim_ordering_unfix(generator.predict(zsamples)).transpose(
            (0, 2, 3, 1)).reshape((10, 10, 32, 32, 3))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = dim_ordering_unfix(autoencoder.predict(xrep)).reshape(
            (10, 9, 3, 32, 32))
        xsamples = dim_ordering_unfix(xsamples).reshape((10, 1, 3, 32, 32))
        samples = np.concatenate((xsamples, xgen), axis=1)
        samples = samples.transpose((0, 1, 3, 4, 2))
        return samples

    autoencoder_cb = ImageGridCallback(os.path.join(
        path, "autoencoded-epoch-{:03d}.png"),
                                       autoencoder_sampler,
                                       cmap=None)

    train_datagen = gen_sample(128, 256, False)
    test_datagen = gen_sample(32, 256, True)
    history = model.fit_generator(train_datagen,
                                  epochs=200,
                                  steps_per_epoch=1000,
                                  validation_data=test_datagen,
                                  validation_steps=100,
                                  callbacks=[generator_cb, autoencoder_cb])

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
예제 #10
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (64, 64)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = model_encoder(latent_dim, input_shape)
    #autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim)

    # assemple AAE
    x = autoencoder.inputs[0]
    #z = encoder(x)
    xpred = autoencoder(x)
    #zreal = normal_latent_sampling((latent_dim,))(x)
    yreal = discriminator(concatenate([x,xpred],axis=3))
    #yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yreal], ["xpred", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    adversarial_optimizer = AdversarialOptimizerSimultaneous()
    model = AdversarialModel(base_model=aae,
                         player_params=[autoencoder.trainable_weights, discriminator.trainable_weights],
                         player_names=["autoencoder", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=['adam', Adam(1e-3, decay=1e-3)],
                              loss={"yreal": "binary_crossentropy",
                              "xpred": masked_mse},
                              player_compile_kwargs=[{"loss_weights": {"yreal": 1e-2, "xpred": 1}}] * 2)




    History=model.fit(x=xtrain, y=y, validation_data=(xval, yval),epochs=100, batch_size=15)

    Outputs = model.predict(xtest)
    print(Outputs[0].shape)
    Outputs = Outputs[0].reshape(Outputs[0].shape[0],64,64)

    #plt.figure()
    #plt.imshow(xtest[1,:,:,0])
    #plt.colorbar()
    #plt.show()

    #plt.figure()
    #plt.imshow(Outputs[1,:,:])
    #plt.colorbar()
    #plt.show()

    #plt.figure()
    #plt.imshow(ytest[0,1,:,:])
    #plt.colorbar()
    #plt.show()
    return (History, Outputs)