コード例 #1
0
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim, targets=gan_targets, loss="binary_crossentropy"):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return
    print("Training: {}".format(csvpath))
    # gan (x -> yfake, yreal), z is gaussian generated on GPU
    # can also experiment with uniform_latent_sampling
    d_g = discriminator(0)
    d_d = discriminator(0.5)
    generator.summary()
    d_d.summary()
    gan_g = simple_gan(generator, d_g, None)
    gan_d = simple_gan(generator, d_d, None)
    x = gan_g.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    gan_g = Model([x], fix_names(gan_g([z, x]), gan_g.output_names))
    gan_d = Model([x], fix_names(gan_d([z, x]), gan_d.output_names))

    # build adversarial model
    model = AdversarialModel(player_models=[gan_g, gan_d],
                             player_params=[generator.trainable_weights,
                                            d_d.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = generator.predict(zsamples)
        xpred = dim_ordering_unfix(xpred.transpose((0, 2, 3, 1)))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(
        os.path.join(path, "epoch-{:03d}.png"),
        generator_sampler, cmap=None
    )

    callbacks = [generator_cb]
    if K.backend() == "tensorflow":
        callbacks.append(TensorBoard(log_dir=os.path.join(path, "logs"), histogram_freq=0, write_graph=True, write_images=True))

    # train model
    x_train, x_test = cifar10_data()
    y = targets(x_train.shape[0])
    y_test = targets(x_test.shape[0])
    history = model.fit(x=x_train, y=y, validation_data=(x_test, y_test), callbacks=callbacks, epochs=nb_epoch, batch_size=32)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    d_d.save(os.path.join(path, "discriminator.h5"))
コード例 #2
0
def example_gan(adversarial_optimizer,
                path,
                opt_g,
                opt_d,
                nb_epoch,
                generator,
                discriminator,
                latent_dim,
                targets=gan_targets,
                loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # train model
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"),
                                     generator_sampler)

    xtrain, xtest = mnist_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb],
                        nb_epoch=nb_epoch,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #3
0
def aae_model(path, adversarial_optimizer,xtrain,encoded_dim=100,img_dim=25, nb_epoch=20):
    # z \in R^100
    latent_dim = encoded_dim
    # x \in R^{28x28}
    input_shape = (img_dim,)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)), name="autoencoder")
    # discriminator (z -> y)
    discriminator = model_discriminator(input_shape)

    # assemple AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)

    yreal = discriminator(x)
    yfake = discriminator(xpred)
    aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))

    # print summary of models
    encoder.summary()
    generator.summary()
    
    discriminator.summary()
    #autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(base_model=aae,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
                             
    #parallel_model = multi_gpu_model(model, gpus=4)
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                             player_optimizers=[Adadelta(),Adadelta()],
                             loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
                                   "xpred": "binary_crossentropy"},
                                   player_compile_kwargs=[{"loss_weights": {"yfake": 1e-4, "yreal": 1e-4, "xpred": 1e1}}]*2)
    # train network
    n = xtrain.shape[0]
    y = [xtrain, np.ones((n, 1)), np.zeros((n, 1)), xtrain, np.zeros((n, 1)), np.ones((n, 1))]
    ntest = xtest.shape[0]
    #ytest = [xtest, np.ones((ntest, 1)), np.zeros((ntest, 1)), xtest, np.zeros((ntest, 1)), np.ones((ntest, 1))]
    history = model.fit(x=xtrain, y=y, epochs=nb_epoch, batch_size=128, shuffle=False)
    
    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "aae_history.csv"))
    # save model
    encoder.save(os.path.join(path, "aae_encoder.h5"))
    generator.save(os.path.join(path, "aae_decoder.h5"))
    discriminator.save(os.path.join(path, "aae_discriminator.h5"))
    K.clear_session()
コード例 #4
0
def example_gan(adversarial_optimizer,
                path,
                X,
                opt_g,
                opt_d,
                nb_epoch,
                generator,
                discriminator,
                latent_dim,
                targets=gan_targets,
                loss='binary_crossentropy',
                params={}):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # train model
    y = targets(X.shape[0])
    history = model.fit(x=X,
                        y=y,
                        nb_epoch=params['epochs'],
                        batch_size=params['batch_size'])

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #5
0
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))

    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=normal_latent_sampling((latent_dim,)))

    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"), generator_sampler, cmap=None)

    xtrain, xtest = cifar10_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    if K.backend() == "tensorflow":
        callbacks.append(
            TensorBoard(log_dir=os.path.join(path, 'logs'), histogram_freq=0, write_graph=True, write_images=True))
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest),
                        callbacks=callbacks, nb_epoch=nb_epoch,
                        batch_size=32)

    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #6
0
def main():
    latent_dim = 100
    input_shape = (1, 28, 28)

    generator = model_generator()
    discriminator = model_discriminator(input_shape=input_shape)
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    generator.summary()
    discriminator.summary()
    gan.summary()

    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    generator_cb = ImageGridCallback(
        "output/gan_convolutional/epoch-{:03d}.png",
        generator_sampler(latent_dim, generator))

    xtrain, xtest = mnist_data()
    xtrain = dim_ordering_fix(xtrain.reshape((-1, 1, 28, 28)))
    xtest = dim_ordering_fix(xtest.reshape((-1, 1, 28, 28)))
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb],
                        nb_epoch=100,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/gan_convolutional/history.csv")

    generator.save("output/gan_convolutional/generator.h5")
    discriminator.save("output/gan_convolutional/discriminator.h5")
コード例 #7
0
def main():
    # set path
    root_dir = os.path.abspath('.')
    data_dir = os.path.join(root_dir, 'MData')
            
    # load data
    train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv'))
    # test = pd.read_csv(os.path.join(data_dir, 'test.csv'))

    temp = []
    for img_name in train.filename:
        image_path = os.path.join(data_dir, 'Train', 'Images', 'train', img_name)
        img = imread(image_path, flatten=True)
        img = img.astype('float32')
        temp.append(img)
        
    train_x = np.stack(temp)
    train_x = train_x / 255

    epochs = 1 
    batch_size = 128    

    model_1 = model_generator_cifar()
    model_2 = model_discriminator_cifar()

    # gan = simple_gan(model_1, model_2, normal_latent_sampling((100,)))
    latent_dim = 100
    gan = simple_gan(model_1, model_2, latent_sampling=normal_latent_sampling((latent_dim,)))

    model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy')
    
    history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=epochs, batch_size=batch_size)    
    zsamples = np.random.normal(size=(10, 100))
    pred = model_1.predict(zsamples)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :], cmap='gray')
        plt.savefig('out/animals/'+str(i)+'.png')
コード例 #8
0
# discriminator
model_2 = Sequential([
    InputLayer(input_shape=d_input_shape),
    
    Flatten(),
        
    Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),

    Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
        
    Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
])

print model_1.summary()
print model_2.summary()

from keras_adversarial import AdversarialModel, simple_gan, gan_targets
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling

gan = simple_gan(model_1, model_2, normal_latent_sampling((100,)))
model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights])
model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy')

print gan.summary()

history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=10, batch_size=batch_size)




コード例 #9
0
def example_bigan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 25
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(
        latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim, ))(x)
    # eliminate z from inputs
    bigan_generator = Model([x],
                            fix_names(bigan_generator([z, x]),
                                      bigan_generator.output_names))
    bigan_discriminator = Model([x],
                                fix_names(bigan_discriminator([z, x]),
                                          bigan_discriminator.output_names))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(
        player_models=[bigan_generator, bigan_discriminator],
        player_params=[
            generative_params, discriminator_train.trainable_weights
        ],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(
        os.path.join(path, "autoencoded-epoch-{:03d}.png"),
        autoencoder_sampler)

    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
コード例 #10
0
ファイル: base.py プロジェクト: musella/GAN
class MyGAN(object):

    # --------------------------------------------------------------------------------------------------
    def __init__(self,
                 x_shape,
                 z_shape,
                 gBuilder,
                 dBuilder,
                 dmBuilder,
                 amBuilder,
                 gan_targets=gan_targets,
                 c_shape=None):
        self.x_shape = x_shape
        self.z_shape = z_shape
        self.c_shape = c_shape

        self.gBuilder = gBuilder
        self.dBuilder = dBuilder

        self.amBuilder = amBuilder
        self.dmBuilder = dmBuilder

        self.gan_targets = gan_targets
        if type(self.gan_targets) == str:
            import keras_adversarial.adversarial_utils
            self.gan_targets = getattr(keras_adversarial.adversarial_utils,
                                       self.gan_targets)

        super(MyGAN, self).__init__()

    # --------------------------------------------------------------------------------------------------
    def get_generator(self):
        return self.gBuilder(self.x_shape, self.z_shape, self.c_shape)

    # --------------------------------------------------------------------------------------------------
    def get_discriminator(self):
        return self.dBuilder(self.x_shape, self.c_shape)

    # --------------------------------------------------------------------------------------------------
    def compile(self):
        self.dm = self.dmBuilder(self.get_discriminator())
        self.am = self.amBuilder(self.get_generator(),
                                 self.get_discriminator())

        return self.am, self.dm

    # --------------------------------------------------------------------------------------------------
    def adversarial_compile(self, loss='binary_crossentropy', schedule=None):
        dm, dmop = self.dmBuilder(self.get_discriminator()[0],
                                  do_compile=False)
        am, amop = self.amBuilder(self.get_generator(),
                                  self.get_discriminator(),
                                  do_compile=False)

        self.am = am
        self.dm = dm
        ## self.gan = Model( inputs=am.inputs + dm.inputs, outputs=am.outputs+dm.outputs )
        self.player_models = (Model(inputs=am[0].inputs + dm.inputs,
                                    outputs=am[0].outputs + dm.outputs),
                              Model(inputs=am[1].inputs + dm.inputs,
                                    outputs=am[1].outputs + dm.outputs))

        self.model = AdversarialModel(
            player_models=self.player_models,
            ## base_model=self.gan,
            player_params=[
                self.get_discriminator()[0].trainable_weights,
                self.get_generator().trainable_weights
            ],
            player_names=["discriminator", "generator"])

        ## optimizer = AdversarialOptimizerSimultaneousWithLoops(nloops=nloops)
        if not schedule is None:
            optimizer = AdversarialOptimizerScheduled(schedule)
        else:
            optimizer = AdversarialOptimizerSimultaneous()

        print(loss)
        self.model.adversarial_compile(adversarial_optimizer=optimizer,
                                       player_optimizers=[amop, dmop],
                                       loss=loss)

    # --------------------------------------------------------------------------------------------------
    def adversarial_fit(self,
                        x_train,
                        z_train,
                        c_x_train=None,
                        c_z_train=None,
                        w_x_train=None,
                        w_z_train=None,
                        x_test=None,
                        z_test=None,
                        c_x_test=None,
                        c_z_test=None,
                        w_x_test=None,
                        w_z_test=None,
                        batch_size=256,
                        n_epochs=50,
                        plot_every=5,
                        monitor_dir="log",
                        checkpoint_every=50,
                        **kwargs):
        if x_test is None:
            x_test = x_train
        if z_test is None:
            z_test = z_train
        if c_x_test is None:
            c_x_test = c_x_train
        if c_z_test is None:
            c_z_test = c_z_train
        if w_x_test is None:
            w_x_test = w_x_train
        if w_z_test is None:
            w_z_test = w_z_train

        if c_z_train is None:
            c_z_train = c_x_train
        if c_z_test is None:
            c_z_test = c_x_test
        if w_z_train is None:
            w_z_train = w_x_train
        if w_z_test is None:
            w_z_test = w_x_test

        has_c = not c_x_train is None
        has_w = not w_x_train is None

        if has_c:
            train_x = [c_z_train, z_train, c_x_train, x_train]
            test_x = [c_z_test, z_test, c_x_test, x_test]
        else:
            train_x = [z_train, x_train]
            test_x = [z_test, x_test]

        train_y = self.gan_targets(train_x[0].shape[0])
        test_y = self.gan_targets(test_x[0].shape[0])
        if has_w:
            train_w = [w_z_train, w_x_train, w_z_train, w_x_train]
            test_w = [w_z_test, w_x_test, w_z_test, w_x_test]
        else:
            train_w = None
            test_w = None

        if not os.path.exists(monitor_dir):
            os.mkdir(monitor_dir)
        plotter = plotting.SlicePlotter(self.get_generator(),
                                        self.get_discriminator()[1],
                                        x_test,
                                        z_test,
                                        c_x_test,
                                        c_z_test,
                                        plot_every=plot_every,
                                        w_x_test=w_x_test,
                                        w_z_test=w_z_test,
                                        do_slices=True,
                                        saveas='%s/sample' % monitor_dir)
        tensorboard = TensorBoard(log_dir='%s/tensorboard' % monitor_dir,
                                  histogram_freq=0)
        csv = CSVLogger("%s/metrics.csv" % monitor_dir)
        ## checkpoint = ModelCheckpoint("%s/model-{epoch:02d}.hdf5" % monitor_dir, monitor='loss',
        ##                              save_best_only=False, save_weights_only=True,
        ##                              period=checkpoint_every)
        checkpoint = MyCheckPoint(self, "%s/" % monitor_dir, checkpoint_every)

        self.model.name = "adversarial_model"
        self.model.fit(train_x,
                       train_y,
                       sample_weight=train_w,
                       nb_epoch=n_epochs,
                       batch_size=batch_size,
                       callbacks=[checkpoint, csv, tensorboard, plotter],
                       **kwargs)

    # --------------------------------------------------------------------------------------------------
    def fit(
        self,
        x_train,
        z_train,
        c_x_train=None,
        c_z_train=None,
        x_test=None,
        z_test=None,
        c_x_test=None,
        c_z_test=None,
        n_disc_steps=1,
        n_gen_steps=1,
        batch_size=256,
        n_epochs=50,
        plot_every=5,
        print_every=1,
        solution=None,
    ):

        if x_test is None:
            x_test = x_train
        if z_test is None:
            z_test = z_train
        if c_x_test is None:
            c_x_test = c_x_test
        if c_z_test is None:
            c_z_test = c_z_train

        if c_z_train is None:
            c_z_train = c_x_train
        if c_z_test is None:
            c_z_test = c_x_test

        has_c = not c_x_train is None

        self.compile()
        n_batches = x_train.shape[0] // batch_size
        generator = self.get_generator()
        discriminator = self.get_discriminator()
        am = self.am
        dm = self.dm
        print_every = n_batches // print_every

        def train_batch(ib):
            x_batch = x_train[ib * batch_size:(ib + 1) * batch_size]
            z_batch = z_train[ib * batch_size:(ib + 1) * batch_size]
            if has_c:
                c_z_batch = c_z_train[ib * batch_size:(ib + 1) * batch_size]
                c_x_batch = c_x_train[ib * batch_size:(ib + 1) * batch_size]
                z_batch = [c_z_batch, z_batch]
                g_batch = generator.predict(z_batch)[1]
            else:
                g_batch = generator.predict(z_batch)

            x_train_b = np.vstack([x_batch, g_batch])
            if has_c:
                c_train_b = np.vstack([c_x_batch, c_z_batch])
                x_train_b = [c_train_b, x_train_b]

            y_train_b = np.ones((2 * batch_size, 1))
            y_train_b[:batch_size, :] = 0

            generator.trainable = False
            for di in range(n_disc_steps):
                d_loss = dm.train_on_batch(x_train_b, y_train_b)
            #d_loss = [0,0]
            generator.trainable = True
            for di in range(n_gen_steps):
                a_loss = am.train_on_batch(z_batch, np.zeros((batch_size, 1)))
            # a_loss = [0,0]

            if ib % print_every == 0:
                msg = "%d: D: [%f %f] A: [%f %f]" % (ib, d_loss[0], d_loss[1],
                                                     a_loss[0], a_loss[1])
                print(msg)

        predictions = []
        for iepoch in range(n_epochs):
            if iepoch % plot_every == 0 or iepoch == n_epochs - 1:
                if has_c:
                    x_predict = generator.predict([c_z_test, z_test])[1]
                else:
                    x_predict = generator.predict(z_test)
                predictions.append(x_predict)
                if has_c:
                    x_discrim = discriminator.predict([c_x_test, x_test])
                    z_discrim = discriminator.predict([c_z_test, x_predict])
                    plotting.plot_summary_cond(x_test, c_x_test, x_predict,
                                               c_z_test, z_test, x_discrim,
                                               z_discrim)  #, solution )
                else:
                    x_discrim = discriminator.predict(x_test)
                    z_discrim = discriminator.predict(x_predict)
                    if x_test.shape[-1] == 1:
                        plotting.plot_summary(x_test.ravel(),
                                              x_predict.ravel(),
                                              z_test.ravel(), x_discrim,
                                              z_discrim, solution)
                    else:
                        plotting.plot_summary_2d(x_test, x_predict, x_discrim,
                                                 z_discrim)
            for ib in range(n_batches):
                train_batch(ib)
コード例 #11
0
gerador.add(Reshape((28, 28)))

# Discriminador
discriminador = Sequential()
discriminador.add(InputLayer(input_shape=(28, 28)))
discriminador.add(Flatten())
discriminador.add(
    Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))
discriminador.add(
    Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))
discriminador.add(
    Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))

gan = simple_gan(gerador, discriminador, normal_latent_sampling((100, )))
modelo = AdversarialModel(
    base_model=gan,
    player_params=[gerador.trainable_weights, discriminador.trainable_weights])
modelo.adversarial_compile(
    adversarial_optimizer=AdversarialOptimizerSimultaneous(),
    player_optimizers=['adam', 'adam'],
    loss='binary_crossentropy')
modelo.fit(x=previsores_treinamento,
           y=gan_targets(60000),
           epochs=100,
           batch_size=256)

amostras = np.random.normal(size=(20, 100))
previsao = gerador.predict(amostras)
for i in range(previsao.shape[0]):
    plt.imshow(previsao[i, :], cmap='gray')
    plt.show()
コード例 #12
0
ファイル: m_gan.py プロジェクト: mbc1990/animal-generator
def main():
    data_dir = "goldens_filtered_32x32_gray/"
    out_dir = "m_gan_out/"
    epochs = 1
    batch_size = 64

    # TODO: Research why these values were chosen
    opt_g = Adam(1e-4, decay=1e-5)
    opt_d = Adam(1e-3, decay=1e-5)
    loss = 'binary_crossentropy'
    latent_dim = 100
    adversarial_optimizer = AdversarialOptimizerSimultaneous()

    # My simple models
    # generator = get_generator()
    # discriminator = get_discriminator()

    # CIFAR example convolutional models
    generator = get_generator_cifar()
    discriminator = get_discriminator_cifar()

    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    temp = []
    for img_name in os.listdir(data_dir):
        image_path = data_dir + img_name
        img = imread(image_path)
        img = img.astype('float32')
        temp.append(img)

    train_x = np.stack(temp)
    train_x = train_x / 255

    # Side effects
    model.fit(x=train_x,
              y=gan_targets(train_x.shape[0]),
              epochs=epochs,
              batch_size=batch_size)

    zsamples = np.random.normal(size=(10, latent_dim))
    pred = generator.predict(zsamples)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :])
        plt.savefig(out_dir + str(i) + '.png')
コード例 #13
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (64, 64)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = model_encoder(latent_dim, input_shape)
    #autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim)

    # assemple AAE
    x = autoencoder.inputs[0]
    #z = encoder(x)
    xpred = autoencoder(x)
    #zreal = normal_latent_sampling((latent_dim,))(x)
    yreal = discriminator(concatenate([x,xpred],axis=3))
    #yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yreal], ["xpred", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    adversarial_optimizer = AdversarialOptimizerSimultaneous()
    model = AdversarialModel(base_model=aae,
                         player_params=[autoencoder.trainable_weights, discriminator.trainable_weights],
                         player_names=["autoencoder", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=['adam', Adam(1e-3, decay=1e-3)],
                              loss={"yreal": "binary_crossentropy",
                              "xpred": masked_mse},
                              player_compile_kwargs=[{"loss_weights": {"yreal": 1e-2, "xpred": 1}}] * 2)




    History=model.fit(x=xtrain, y=y, validation_data=(xval, yval),epochs=100, batch_size=15)

    Outputs = model.predict(xtest)
    print(Outputs[0].shape)
    Outputs = Outputs[0].reshape(Outputs[0].shape[0],64,64)

    #plt.figure()
    #plt.imshow(xtest[1,:,:,0])
    #plt.colorbar()
    #plt.show()

    #plt.figure()
    #plt.imshow(Outputs[1,:,:])
    #plt.colorbar()
    #plt.show()

    #plt.figure()
    #plt.imshow(ytest[0,1,:,:])
    #plt.colorbar()
    #plt.show()
    return (History, Outputs)
コード例 #14
0
class FastAdversarialMF(MatrixFactorization):
    def __init__(self, uNum, iNum, dim, weight, pop_percent):

        self.uNum = uNum
        self.iNum = iNum
        self.dim = dim
        self.weight = weight
        self.pop_percent = pop_percent

        # Define user input -- user index (an integer)
        userInput = Input(shape=(1,), dtype="int32")
        itemInput = Input(shape=(1,), dtype="int32")
        userAdvInput = Input(shape=(1,), dtype="int32")
        itemAdvInput = Input(shape=(1,), dtype="int32")

        userEmbeddingLayer = Embedding(input_dim=uNum, output_dim=dim)
        itemEmbeddingLayer = Embedding(input_dim=iNum, output_dim=dim)

        uEmb = Flatten()(userEmbeddingLayer(userInput))
        iEmb = Flatten()(itemEmbeddingLayer(itemInput))
        uAdvEmb = Flatten()(userEmbeddingLayer(userAdvInput))
        iAdvEmb = Flatten()(itemEmbeddingLayer(itemAdvInput))

        self.uEncoder = Model(userInput, uEmb)
        self.iEncoder = Model(itemInput, iEmb)

        self.discriminator_i = self.generate_discriminator()
        self.discriminator_i.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy'])
        self.discriminator_i.trainable = False
        validity = self.discriminator_i(iAdvEmb)

        self.discriminator_u = self.generate_discriminator()
        self.discriminator_u.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy'])
        self.discriminator_u.trainable = False
        validity_u = self.discriminator_u(uAdvEmb)

        pred = dot([uEmb, iEmb], axes=-1)
        # pred = merge([uEmb, iEmb], mode="concat")

        self.model = Model([userInput, itemInput], pred)
        # self.model.compile(optimizer="adam", loss="mean_squared_error", metrics=['mse'])

        # self.advModel = Model([userInput, itemInput, userAdvInput, itemAdvInput], [pred, validity_u, validity])
        # self.advModel.compile(optimizer="adam",
        #                       loss=["mean_squared_error", "binary_crossentropy", "binary_crossentropy"],
        #                       metrics=['mse', 'acc', 'acc'], loss_weights=[1, self.weight, self.weight])

        self.aae = Model([userInput, itemInput, userAdvInput, itemAdvInput],
                         fix_names([pred, validity_u, validity], ["xpred", "upred", "ipred"]))

        mf_params = self.uEncoder.trainable_weights + self.iEncoder.trainable_weights
        self.advModel = AdversarialModel(base_model=self.aae,
                                         player_params=[mf_params, self.discriminator_u.trainable_weights,
                                                        self.discriminator_i.trainable_weights],
                                         player_names=["mf", "disc_u", "disc_i"])

        self.advModel.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),
                                          player_optimizers=[Adam(), Adam(), Adam()],
                                          loss={"upred": "binary_crossentropy", "ipred": "binary_crossentropy",
                                                "xpred": "mean_squared_error"},
                                          player_compile_kwargs=[{"loss_weights": {"upred": 1, "ipred": 1,
                                                                                   "xpred": 1}}] * 2)

        # model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
        #                           player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
        #                           loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
        #                                 "xpred": "mean_squared_error"},
        #                           player_compile_kwargs=[{"loss_weights": {"yfake": 1e-2, "yreal": 1e-2,
        #                                                                    "xpred": 1}}] * 2)

    def init(self, users, items):
        self.popular_user_x, self.rare_user_x = self.get_discriminator_train_data(users)
        self.popular_item_x, self.rare_item_x = self.get_discriminator_train_data(items)




    def train(self, x_train, y_train, batch_size):


        # sample batches for User Discriminator

        pop_idx = np.random.randint(0, len(self.popular_user_x), int(len(y_train) / 2))
        rare_idx = np.random.randint(0, len(self.rare_user_x), int(len(y_train) / 2))

        user_x = np.concatenate([self.popular_user_x[pop_idx], self.rare_user_x[rare_idx]])[:len(y_train)]
        user_y = np.concatenate([np.ones(int(len(y_train) / 2)), np.zeros(int(len(y_train) / 2))])[:len(y_train)]
        user_x = self.uEncoder.predict((user_x))

        # sample mini-batches for Item Discriminator

        pop_idx = np.random.randint(0, len(self.popular_item_x), int(len(y_train) / 2))
        rare_idx = np.random.randint(0, len(self.rare_item_x), int(len(y_train) / 2))

        item_x = np.concatenate([self.popular_item_x[pop_idx], self.rare_item_x[rare_idx]])[:len(y_train)]
        item_y = np.concatenate([np.ones(int(len(y_train) / 2)), np.zeros(int(len(y_train) / 2))])[:len(y_train)]
        item_x = self.iEncoder.predict((item_x))

        # Train adversarial model
        x = x_train + [user_x, item_x, user_x, item_x]
        y = [y_train, user_y, item_y, y_train, user_y[::-1], item_y[::-1]]

        history = self.advModel.fit(x=x, y=y, batch_size=batch_size, epochs=1, verbose=1)

        return history

    def generate_discriminator(self):

        itemInput = Input(shape=(self.dim,))
        hidden = Dense(self.dim, activation="relu")
        finalHidden = Dense(1, activation="sigmoid")

        pred = finalHidden(hidden(itemInput))

        return Model(itemInput, pred)

    def get_discriminator_train_data(self, x):

        popularity = {}
        for i in x:
            if i in popularity:
                popularity[i] += 1
            else:
                popularity[i] = 1

        popularity = {k: v for k, v in sorted(popularity.items(), key=lambda x: x[1])[::-1]}
        popularity = np.array(list(popularity.keys()))

        popular_x = popularity[:int(len(popularity) * self.pop_percent)]
        rare_x = popularity[int(len(popularity) * self.pop_percent):]

        return popular_x, rare_x
コード例 #15
0
def example_bigan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 25
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
    bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
                             player_params=[generative_params, discriminator_train.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler)

    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
コード例 #16
0
def driver_gan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 3
    # x \in R^{28x28}
    input_shape = (15, 6)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
    bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))

    # Merging encoder weights and generator weights
    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
                             player_params=[generative_params, discriminator_train.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-7, decay=1e-7), Adam(1e-6, decay=1e-7)],
                              loss='binary_crossentropy')

    # load driver data
    train_dataset = [1,2,5]
    test_dataset = [3,4]
    train_reader = data_base(train_dataset)
    test_reader = data_base(test_dataset)
    xtrain, xtest = train_reader.read_files(),test_reader.read_files()
    # ---------------------------------------------------------------------------------
    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(1 * 1, latent_dim))  #---------------------------------> (10,10)
        return generator.predict(zsamples).reshape((1, 1, 15, 6))# confused ***********************************default (10,10,28,28)


    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10) # the number of testdata set
        xrep = np.repeat(xsamples, 5, axis=0) # the number of train dataset
        xgen = autoencoder.predict(xrep).reshape((1, 1, 15, 6))
        xsamples = xsamples.reshape((1, 1, 15, 6))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x


    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest),
                        nb_epoch=25, batch_size=10, verbose=0)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
コード例 #17
0
# print(generator.summary())
# print(discriminator.summary())

# Build a GAN
gan = simple_gan(generator=generator,
                 discriminator=discriminator,
                 latent_sampling=normal_latent_sampling((100, )))
model = AdversarialModel(
    base_model=gan,
    player_params=[
        generator.trainable_weights, discriminator.trainable_weights
    ],
)

model.adversarial_compile(
    adversarial_optimizer=AdversarialOptimizerSimultaneous(),
    player_optimizers=['adam', 'adam'],
    loss='binary_crossentropy')
history = model.fit(train_data,
                    gan_targets(train_data.shape[0]),
                    epochs=10,
                    batch_size=batch_size)

sample = np.random.normal(size=(10, 100))
pred = generator.predict(sample)

for i in range(pred.shape[0]):
    plt.imshow(pred[i, :], cmap='gray')
    plt.show()

# print(model.summary())
コード例 #18
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim)

    # assemple AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim, ))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal],
                             ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(
        base_model=aae,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss={
            "yfake": "binary_crossentropy",
            "yreal": "binary_crossentropy",
            "xpred": "mean_squared_error"
        },
        compile_kwargs={
            "loss_weights": {
                "yfake": 1e-2,
                "yreal": 1e-2,
                "xpred": 1
            }
        })

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        samples = np.concatenate((xsamples, xgen), axis=1)
        return samples

    autoencoder_cb = ImageGridCallback(
        os.path.join(path, "autoencoded-epoch-{:03d}.png"),
        autoencoder_sampler)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    y = [
        xtrain,
        np.ones((n, 1)),
        np.zeros((n, 1)), xtrain,
        np.zeros((n, 1)),
        np.ones((n, 1))
    ]
    ntest = xtest.shape[0]
    ytest = [
        xtest,
        np.ones((ntest, 1)),
        np.zeros((ntest, 1)), xtest,
        np.zeros((ntest, 1)),
        np.ones((ntest, 1))
    ]
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #19
0
ファイル: tutorial.py プロジェクト: mbc1990/animal-generator
def main():
    # to stop potential randomness
    seed = 128
    rng = np.random.RandomState(seed)

    # set path
    root_dir = os.path.abspath('.')
    data_dir = os.path.join(root_dir, 'Data')

    # load data
    train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv'))
    # test = pd.read_csv(os.path.join(data_dir, 'test.csv'))

    temp = []
    for img_name in train.filename:
        image_path = os.path.join(data_dir, 'Train', 'Images', 'train',
                                  img_name)
        img = imread(image_path, flatten=True)
        img = img.astype('float32')
        temp.append(img)

    train_x = np.stack(temp)

    train_x = train_x / 255

    # print image
    img_name = rng.choice(train.filename)
    filepath = os.path.join(data_dir, 'Train', 'Images', 'train', img_name)

    img = imread(filepath, flatten=True)

    # pylab stuff, who f****n knows
    # pylab.imshow(img, cmap='gray')
    # pylab.axis('off')
    # pylab.show()

    # Levers
    g_input_shape = 100
    d_input_shape = (28, 28)
    hidden_1_num_units = 500
    hidden_2_num_units = 500
    g_output_num_units = 784
    d_output_num_units = 1
    epochs = 25
    batch_size = 128

    # generator
    model_1 = Sequential([
        Dense(units=hidden_1_num_units,
              input_dim=g_input_shape,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=g_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Reshape(d_input_shape),
    ])

    # discriminator
    model_2 = Sequential([
        InputLayer(input_shape=d_input_shape),
        Flatten(),
        Dense(units=hidden_1_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=d_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
    ])
    gan = simple_gan(model_1, model_2, normal_latent_sampling((100, )))
    model = AdversarialModel(
        base_model=gan,
        player_params=[model_1.trainable_weights, model_2.trainable_weights])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=['adam', 'adam'],
        loss='binary_crossentropy')

    history = model.fit(x=train_x,
                        y=gan_targets(train_x.shape[0]),
                        epochs=10,
                        batch_size=batch_size)
    zsamples = np.random.normal(size=(10, 100))
    pred = model_1.predict(zsamples)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :], cmap='gray')
        plt.savefig('out/numbers/' + str(i) + '.png')
コード例 #20
0
])

# Compiling the GAN

gan = simple_gan(model_g, model_d, normal_latent_sampling((100, )))
model = AdversarialModel(
    base_model=gan,
    player_params=[model_g.trainable_weights, model_d.trainable_weights])
model.adversarial_compile(
    adversarial_optimizer=AdversarialOptimizerSimultaneous(),
    player_optimizers=['adam', 'adam'],
    loss='binary_crossentropy')

# Training the GAN
res = model.fit(x=train_X,
                y=gan_targets(train_X.shape[0]),
                epochs=10,
                batch_size=batch_size)

# Visualisation
plt.plot(history.history['player_0_loss'])
plt.plot(history.history['player_1_loss'])
plt.plot(history.history['loss'])

# Generation of the image

samples = np.random.normal(size=(10, 100))
pred = model_g.predict(samples)
for i in range(pred.shape[0]):
    plt.imshow(pred[i, :], cmap='gray')
    plt.show()
コード例 #21
0
    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # train model
    generator_cb = ImageGridCallback("output/gan_convolutional/epoch-{:03d}.png",
                                     generator_sampler(latent_dim, generator))

    xtrain, xtest = mnist_data()
    xtrain = dim_ordering_fix(xtrain.reshape((-1, 1, 28, 28)))
    xtest = dim_ordering_fix(xtest.reshape((-1, 1, 28, 28)))
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb], nb_epoch=100,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/gan_convolutional/history.csv")

    generator.save("output/gan_convolutional/generator.h5")
    discriminator.save("output/gan_convolutional/discriminator.h5")
コード例 #22
0
def main():
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator = model_discriminator(latent_dim, input_shape)
    # bigan (x - > yfake, yreal), z generated on GPU
    bigan = simple_bigan(generator, encoder, discriminator, normal_latent_sampling((latent_dim,)))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    bigan.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(base_model=bigan,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # train model
    xtrain, xtest = mnist_data()

    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png", generator_sampler)

    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback("output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler)

    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100, batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/bigan/history.csv")

    encoder.save("output/bigan/encoder.h5")
    generator.save("output/bigan/generator.h5")
    discriminator.save("output/bigan/discriminator.h5")
コード例 #23
0
class UnifAI(object):
    
    def __init__(self, config):
        self.config = config
        self.model_config = self.config.model_config  # alias
        
        self.module_builder = UnifAI_ModuleBuilder(
            self.model_config, self.config.remote_weights_path
        )
        
        self.encoder = None
        self.predictor = None
        self.decoder = None
        self.noisy_transformer = None
        self.disentangler1 = None
        self.disentangler2 = None
        self.z_discriminator = None
        
        self.model_inference = None
        self.model_train = None
        
        self.optimizers = self.config.optimizers  # alias
        self.compiled = False
    
    def __random_target(self, x, dim, embedding_activation):
            range_low = -1 if embedding_activation == 'tanh' else 0
            range_high = 1
            tfake = uniform_latent_sampling(
                (dim,), low=range_low, high=range_high
            )(x)
            return tfake
    
    def __build_connected_network_train(self, main=True):
        x = self.encoder.inputs[0]
        e1, e2 = self.encoder(x)
        
        noisy_e1 = self.noisy_transformer(e1)

        y = self.predictor(e1)
        x_pred = self.decoder([noisy_e1, e2])

        e1_dim = int(self.encoder.outputs[0].shape[-1])
        e2_dim = int(self.encoder.outputs[1].shape[-1])

        output_vars = [y, x_pred]
        output_names = ['y', 'x_pred']

        e1_target = e1
        e2_target = e2
        e2_pred = self.disentangler1(e1)
        e1_pred = self.disentangler2(e2)

        if main:
            embedding_activation = self.model_config.embedding_activation
            e2_target = self.__random_target(x, e2_dim, embedding_activation)
            e1_target = self.__random_target(x, e1_dim, embedding_activation)

        e1_e1_pred = Concatenate()([e1_target, e1_pred])
        output_vars.append(e1_e1_pred)
        output_names.append('e1pred')
        
        e2_e2_pred = Concatenate()([e2_target, e2_pred])
        output_vars.append(e2_e2_pred)
        output_names.append('e2pred')
        
        if self.z_discriminator is not None:
            z = self.z_discriminator(e1)
            output_vars.append(z)
            output_names.append('z')

        outputs = fix_names(output_vars, output_names)
        network = Model(inputs=[x], outputs=outputs)

        return network
    
    def build_model_train(self):
        if self.model_train is None:
            with tf.device('/gpu:0'):
                # Build modules:

                ## Encoder: x -> [e1, e2]
                ## Predictor: e1 -> y
                ## Noisy-transformer: e1 -> noisy_e1
                ## Decoder: [noisy_e1, e2] -> x
                
                self.encoder, self.predictor, self.decoder = \
                    self.module_builder.build_default_modules(
                        ['encoder', 'predictor', 'decoder']
                    )
                
                self.noisy_transformer = self.module_builder.build_module(
                    'noisy_transformer', name='noisy_transformer',
                    build_kwargs={
                        'params': [self.config.dropout_rate]
                    }
                )

                ## Disentanglers:
                self.disentangler1 = self.module_builder.build_module(
                    'disentangler', name='disentangler1',
                    build_kwargs={
                        'input_dim': self.model_config.embedding_dim_1,
                        'output_dim': self.model_config.embedding_dim_2
                    }
                )
                self.disentangler2 = self.module_builder.build_module(
                    'disentangler', name='disentangler2',
                    build_kwargs={
                        'input_dim': self.model_config.embedding_dim_2,
                        'output_dim': self.model_config.embedding_dim_1
                    }
                )
                
                ## z_discriminator:
                if self.config.bias:
                    self.z_discriminator = self.module_builder.build_module(
                        'z_discriminator', name='z_discriminator'
                    )

                # Build 2 copies of the connected network            
                main_model = self.__build_connected_network_train(main=True)
                adv_model = self.__build_connected_network_train(main=False)
                
                models = [main_model, adv_model]
    
            # Parallelize over GPUs
            if self.config.num_gpus > 1:
                for i in range(len(models)):
                    models[i] = \
                        multi_gpu_model(models[i], gpus=self.config.num_gpus)
            
            # Create final model
            
            ## Gather params
            main_params = self.encoder.trainable_weights + \
                self.predictor.trainable_weights + self.decoder.trainable_weights
            adv_params = self.disentangler1.trainable_weights + \
                self.disentangler2.trainable_weights
            if self.config.bias:
                adv_params.extend(self.z_discriminator.trainable_weights)
            
            ## Build keras_adversarial model
            self.model_train = AdversarialModel(
                player_models=models,
                player_params=[main_params, adv_params],
                player_names=['main_model', 'adv_model']
            )
    
    def compile_model(self):
        assert self.model_train is not None, 'run build_model_train()'
        
        optimizers = self.config.optimizers
        losses = self.config.losses
        
        main_loss_weights = [lw for lw in self.config.loss_weights]
        adv_loss_weights = [lw for lw in self.config.loss_weights]
        player_compile_kwargs = [
            {
                'loss_weights': main_loss_weights,
                'metrics': self.config.metrics
            },
            {
                'loss_weights': adv_loss_weights,
                'metrics': self.config.metrics
            }
        ]
        
        adversarial_optimizer = AdversarialOptimizerScheduled(
            [int(p) for p in list(self.config.training_schedule)]
        )
        self.model_train.adversarial_compile(
            adversarial_optimizer=adversarial_optimizer,
            player_optimizers=optimizers,
            loss=losses, player_compile_kwargs=player_compile_kwargs
        )
        
        self.compiled = True
        
    def build_compiled_model(self):
        self.build_model_train()
        self.compile_model()
    
    def fit(self, dtrain, dvalid, streaming_data=False, epochs=None,
            callbacks=None, training_steps=None, validation_steps=None):
        assert self.compiled, 'run compile_model() before training'
        
        if streaming_data:
            train_generator = dtrain
            valid_generator = dvalid
            
            self.model_train.fit_generator(
                train_generator,
                steps_per_epoch=training_steps,
                validation_data=valid_generator,
                validation_steps=validation_steps,
                callbacks=callbacks,
                epochs=epochs
            )
        else:
            xtrain, ytrain = dtrain
            xvalid, yvalid = dvalid

            self.model_train.fit(
                x=xtrain, y=ytrain,
                validation_data=(xvalid, yvalid),
                callbacks=callbacks, epochs=epochs,
                batch_size=(self.config.batch_size * self.config.num_gpus)
            )        
    
    def build_model_inference(self, checkpoint_epoch=None):
        if self.model_inference is None:
            device = '/cpu:0' if self.config.num_gpus > 1 else '/gpu:0'
            with tf.device(device):
                self.encoder = self.module_builder.build_module(
                    'encoder', epoch=checkpoint_epoch
                )
                self.predictor = self.module_builder.build_module(
                    'predictor', epoch=checkpoint_epoch
                )
                
                x = self.encoder.inputs[0]
                e1, _ = self.encoder(x)
                y = self.predictor(e1)
                self.model_inference = Model(x, y)
            
            # Parallelize over GPUs
            if self.config.num_gpus > 1:
                self.model_inference = multi_gpu_model(
                    self.model_inference, gpus=self.config.num_gpus
                )
    
    def predict(self, data, streaming_data=False, prediction_steps=None):
        assert self.model_inference is not None, 'run build_model_inference() first'
        
        if streaming_data:
            return self.model_inference.predict_generator(data, steps=prediction_steps)
        else:
            return self.model_inference.predict(data)
コード例 #24
0
def main():
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator = model_discriminator(latent_dim, input_shape)
    # bigan (x - > yfake, yreal), z generated on GPU
    bigan = simple_bigan(generator, encoder, discriminator,
                         normal_latent_sampling((latent_dim, )))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    bigan.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(
        base_model=bigan,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    # train model
    xtrain, xtest = mnist_data()

    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png",
                                     generator_sampler)

    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(
        "output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler)

    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/bigan/history.csv")

    encoder.save("output/bigan/encoder.h5")
    generator.save("output/bigan/generator.h5")
    discriminator.save("output/bigan/discriminator.h5")
コード例 #25
0
          bias_initializer='ones',
          bias_constraint=non_neg()))

gan = simple_gan(generator, discriminator, normal_latent_sampling((25, )))
model = AdversarialModel(base_model=gan,
                         player_params=[
                             generator.trainable_weights,
                             discriminator.trainable_weights
                         ])
model.adversarial_compile(
    adversarial_optimizer=AdversarialOptimizerSimultaneous(),
    player_optimizers=['adam', 'adam'],
    loss='binary_crossentropy')
test = model.fit(x=x_train,
                 y=gan_targets(np.array(x_train).shape[0]),
                 epochs=100,
                 batch_size=50,
                 shuffle=True)

discriminator.save('discriminator.h5')
generator.save('generator.h5')

generator = load_model('generator.h5')

houseTest = []
pred = generator.predict(np.random.uniform(-1.0, 1.0, size=(1, 25)))

for i in range(len(pred)):
    houseTest.append(normalizationVector[i] * pred[i])
print(houseTest)
コード例 #26
0
def gan():
    # define variables
    # 初始化一些参数
    g_input_shape = 100  # 生成器输入层节点数
    d_input_shape = (28, 28)  # 辨别器输入层节点数
    hidden_1_num_units = 500
    hidden_2_num_units = 500
    g_output_num_units = 784  # 生成器输出层节点数28*28
    d_output_num_units = 1  # 辨别器输出层节点数1个,辨别是否是真实图片
    epochs = 100
    batch_size = 128

    # 定义生成器,用于生成图片
    model_g = Sequential([
        Dense(units=hidden_1_num_units,
              input_dim=g_input_shape,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Dense(units=g_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Reshape(d_input_shape)
    ])

    # 定义分辨器,用于辨别图片
    model_d = Sequential([
        InputLayer(input_shape=d_input_shape),
        Flatten(),
        Dense(units=hidden_1_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Dense(units=d_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1E-5, 1E-5))
    ])
    # model_g.summary()
    # model_d.summary()

    from keras_adversarial import AdversarialModel, simple_gan, gan_targets
    from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
    # 开始训练gan网络
    gan = simple_gan(model_g, model_d, normal_latent_sampling((100, )))
    # gan.summary()
    # 在keras2.2.x版本中,下面的代码会报错,keras2.1.2中不会
    model = AdversarialModel(
        base_model=gan,
        player_params=[model_g.trainable_weights, model_d.trainable_weights])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=['adam', 'adam'],
        loss='binary_crossentropy')
    # 使用训练数据进行训练
    # 把keras_adversarial clone到了本地,然后替换掉了pip安装的keras_adversarial
    # 解决了这个报错AttributeError: 'AdversarialModel' object has no attribute '_feed_output_shapes'
    history = model.fit(x=train_x,
                        y=gan_targets(train_x.shape[0]),
                        epochs=epochs,
                        batch_size=batch_size)
    # 保存为h5文件
    model_g.save_weights('gan1_g.h5')
    model_d.save_weights('gan1_d.h5')
    model.save_weights('gan1.h5')

    # 绘制训练结果的loss
    plt.plot(history.history['player_0_loss'], label='player_0_loss')
    plt.plot(history.history['player_1_loss'], label='player_1_loss')
    plt.plot(history.history['loss'], label='loss')
    plt.show()

    # 训练之后100次之后生成的图像
    # 随机生成10组数据,生成10张图像
    zsample = np.random.normal(size=(10, 100))
    pred = model_g.predict(zsample)
    print(pred.shape)  # (10,28,28)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :], cmap='gray')
        plt.show()
コード例 #27
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim)

    # assemple AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim,))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(base_model=aae,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
                                    "xpred": "mean_squared_error"},
                              compile_kwargs={"loss_weights": {"yfake": 1e-2, "yreal": 1e-2, "xpred": 1}})

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        samples = np.concatenate((xsamples, xgen), axis=1)
        return samples

    autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    y = [xtrain, np.ones((n, 1)), np.zeros((n, 1)), xtrain, np.zeros((n, 1)), np.ones((n, 1))]
    ntest = xtest.shape[0]
    ytest = [xtest, np.ones((ntest, 1)), np.zeros((ntest, 1)), xtest, np.zeros((ntest, 1)), np.ones((ntest, 1))]
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #28
0
    xtrain = x[:-10]
    xtest = x[-10:]

    mini = np.min(xtrain.ravel())
    maxi = np.max(xtrain.ravel())
    xtrain = (xtrain - mini) / (maxi - mini)
    xtest = (xtest - mini) / (maxi - mini)

    xtrain = dim_ordering_fix(xtrain.reshape((-1, 1, 92, 92)))
    xtest = dim_ordering_fix(xtest.reshape((-1, 1, 92, 92)))

    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb],
                        nb_epoch=100,
                        batch_size=10)
    df = pd.DataFrame(history.history)
    df.to_csv("output/gan_convolutional/history.csv")

    generator.save("output/gan_convolutional/generator.h5")
    discriminator.save("output/gan_convolutional/discriminator.h5")

    #  print(xtrain[0])
    """
    print(xtest[0])
    """

    yy = generator_skampler(latent_dim, generator)
コード例 #29
0
def generator_sampler():
	xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
	return xpred.reshape((10, 10) + xpred.shape[1:])
generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"),
generator_sampler, cmap=None)

# train model
xtrain, xtest = cifar10_data()
y = targets(xtrain.shape[0])
ytest = targets(xtest.shape[0])
callbacks = [generator_cb]
if K.backend() == "tensorflow":
	callbacks.append(TensorBoard(log_dir=os.path.join(path, 'logs'),
	histogram_freq=0, write_graph=True, write_images=True))
history = model.fit(x=dim_ordering_fix(xtrain),y=y,
validation_data=(dim_ordering_fix(xtest), ytest),
callbacks=callbacks, nb_epoch=nb_epoch,
batch_size=32)
# save history to CSV
df = pd.DataFrame(history.history)
df.to_csv(csvpath)
# save models
generator.save(os.path.join(path, "generator.h5"))
d_d.save(os.path.join(path, "discriminator.h5"))


def main():
	# z in R^100
	latent_dim = 100
	# x in R^{28x28}
	# generator (z -> x)
	generator = model_generator()