コード例 #1
0
    def ali_model(self):
        self.encoder = self.encoder_model()
        self.decoder = self.decoder_model()
        disc_train_enc_dec, disc_train_disc = self.discriminator_model()

        bigan_train_enc_dec = simple_bigan(self.decoder, self.encoder,
                                           disc_train_enc_dec)
        bigan_train_disc = simple_bigan(self.decoder, self.encoder,
                                        disc_train_disc)

        x = bigan_train_enc_dec.inputs[1]
        z = normal_latent_sampling(self.embedding_size)(x)

        # fix names???
        bigan_train_enc_dec = Model(x, bigan_train_enc_dec([z, x]))
        bigan_train_disc = Model(x, bigan_train_disc([z, x]))

        # encoder.summary()
        # decoder.summary()
        # disc_train_enc_dec.summary()
        # disc_train_disc.summary()
        # bigan_train_enc_dec.summary()
        # bigan_train_disc.summary()

        model = AdversarialModel(player_models=[bigan_train_enc_dec, bigan_train_disc],
                                 player_params=[self.encoder.trainable_weights + self.encoder.trainable_weights, \
                                                disc_train_disc.trainable_weights],
                                 player_names=['encoder_decoder', 'discriminator'])
        model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerScheduled(self.train_schedule),
                                  player_optimizers=[Adam(lr=5e-5, beta_1=0.5, beta_2=1e-3), \
                                                     Adam(lr=5e-5, beta_1=0.5, beta_2=1e-3)],
                                  loss=['mean_squared_error', 'mean_squared_error'])
        return model
コード例 #2
0
def gan_model_test():
    latent_dim = 10
    input_dim = 5
    generator = model_generator(input_dim=input_dim, latent_dim=latent_dim)
    discriminator = model_discriminator(input_dim=input_dim)
    gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim,)))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    adversarial_optimizer = AdversarialOptimizerSimultaneous()
    opt_g = Adam(1e-4)
    opt_d = Adam(1e-3)
    loss = 'binary_crossentropy'
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # train model
    batch_size = 32
    n = batch_size * 8
    x = np.random.random((n, input_dim))
    y = gan_targets(n)
    fit(model, x, y, nb_epoch=3, batch_size=batch_size)
コード例 #3
0
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim, targets=gan_targets, loss="binary_crossentropy"):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return
    print("Training: {}".format(csvpath))
    # gan (x -> yfake, yreal), z is gaussian generated on GPU
    # can also experiment with uniform_latent_sampling
    d_g = discriminator(0)
    d_d = discriminator(0.5)
    generator.summary()
    d_d.summary()
    gan_g = simple_gan(generator, d_g, None)
    gan_d = simple_gan(generator, d_d, None)
    x = gan_g.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    gan_g = Model([x], fix_names(gan_g([z, x]), gan_g.output_names))
    gan_d = Model([x], fix_names(gan_d([z, x]), gan_d.output_names))

    # build adversarial model
    model = AdversarialModel(player_models=[gan_g, gan_d],
                             player_params=[generator.trainable_weights,
                                            d_d.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = generator.predict(zsamples)
        xpred = dim_ordering_unfix(xpred.transpose((0, 2, 3, 1)))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(
        os.path.join(path, "epoch-{:03d}.png"),
        generator_sampler, cmap=None
    )

    callbacks = [generator_cb]
    if K.backend() == "tensorflow":
        callbacks.append(TensorBoard(log_dir=os.path.join(path, "logs"), histogram_freq=0, write_graph=True, write_images=True))

    # train model
    x_train, x_test = cifar10_data()
    y = targets(x_train.shape[0])
    y_test = targets(x_test.shape[0])
    history = model.fit(x=x_train, y=y, validation_data=(x_test, y_test), callbacks=callbacks, epochs=nb_epoch, batch_size=32)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    d_d.save(os.path.join(path, "discriminator.h5"))
コード例 #4
0
def example_gan(adversarial_optimizer,
                path,
                opt_g,
                opt_d,
                nb_epoch,
                generator,
                discriminator,
                latent_dim,
                targets=gan_targets,
                loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # train model
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"),
                                     generator_sampler)

    xtrain, xtest = mnist_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb],
                        nb_epoch=nb_epoch,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #5
0
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z is gaussian generated on GPU
    # can also experiment with uniform_latent_sampling
    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=normal_latent_sampling((latent_dim,)))

    # 적대적 모델 정의
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # 이미지 생성을 위한 콜백 생성
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"), generator_sampler, cmap=None)

    # 모델 학습
    xtrain, xtest = cifar10_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    K.set_image_dim_ordering('tf')
    if K.backend() == "tensorflow":
        os.makedirs(path + '/logs',exist_ok=True)
        callbacks.append(
            TensorBoard(log_dir=os.path.join(path, 'logs'), histogram_freq=0, write_graph=True, write_images=True))

    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
                  callbacks=callbacks, nb_epoch=nb_epoch,
                  batch_size=32)


    # 히스토리를 CSV에 저장
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # 모델 저장
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #6
0
ファイル: main.py プロジェクト: DrRedwanNewaz/D-GAN
def train_gan(adversarial_optimizer,
              path,
              opt_g,
              opt_d,
              nb_epoch,
              generator,
              discriminator,
              latent_dim,
              targets=gan_targets,
              loss='mse'):
    csvpath = os.path.join(path, "history.csv")
    id = 0
    while os.path.exists(csvpath):
        name = "history_%d" % id + ".csv"
        csvpath = os.path.join(path, name)
        print("Already exists: {}".format(csvpath))
        id += 1

    print("Training: {}".format(csvpath))
    print(os.path.join(path, 'logs'))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    xtrain, xtest = db.get_dataset(5, 'train')
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])

    history = fit(model,
                  x=xtrain,
                  y=y,
                  validation_data=(xtest, ytest),
                  nb_epoch=nb_epoch,
                  batch_size=1,
                  verbose=2)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)
    # save models
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
    return generator
コード例 #7
0
def example_gan(adversarial_optimizer,
                path,
                X,
                opt_g,
                opt_d,
                nb_epoch,
                generator,
                discriminator,
                latent_dim,
                targets=gan_targets,
                loss='binary_crossentropy',
                params={}):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # train model
    y = targets(X.shape[0])
    history = model.fit(x=X,
                        y=y,
                        nb_epoch=params['epochs'],
                        batch_size=params['batch_size'])

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #8
0
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z is gaussian generated on GPU
    # can also experiment with uniform_latent_sampling
    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=normal_latent_sampling((latent_dim,)))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"), generator_sampler, cmap=None)

    # train model
    xtrain, xtest = cifar10_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    if K.backend() == "tensorflow":
        callbacks.append(
            TensorBoard(log_dir=os.path.join(path, 'logs'), histogram_freq=0, write_graph=True, write_images=True))
    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
                  callbacks=callbacks, nb_epoch=nb_epoch,
                  batch_size=32)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #9
0
def example_gan(adversarial_optimizer,
                path,
                opt_g,
                opt_d,
                nb_epoch,
                generator,
                discriminator,
                latent_dim,
                targets=gan_targets,
                loss='binary_crossentropy'):

    # gan (x - > yfake, yreal)
    # z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])

    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"),
                                     generator_sampler)

    # train model
    xtrain, xtest = mnist_data()

    # targets = gan_targets -> a 0/1 címkéket rendeli az adatokhoz
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    history = fit(model,
                  x=xtrain,
                  y=y,
                  validation_data=(xtest, ytest),
                  callbacks=callbacks,
                  nb_epoch=nb_epoch,
                  batch_size=32)
コード例 #10
0
def main():
    latent_dim = 100
    input_shape = (1, 28, 28)

    generator = model_generator()
    discriminator = model_discriminator(input_shape=input_shape)
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    generator.summary()
    discriminator.summary()
    gan.summary()

    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    generator_cb = ImageGridCallback(
        "output/gan_convolutional/epoch-{:03d}.png",
        generator_sampler(latent_dim, generator))

    xtrain, xtest = mnist_data()
    xtrain = dim_ordering_fix(xtrain.reshape((-1, 1, 28, 28)))
    xtest = dim_ordering_fix(xtest.reshape((-1, 1, 28, 28)))
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb],
                        nb_epoch=100,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/gan_convolutional/history.csv")

    generator.save("output/gan_convolutional/generator.h5")
    discriminator.save("output/gan_convolutional/discriminator.h5")
コード例 #11
0
def run_gan(exp_dir, adversarial_optimizer, opt_g, opt_d, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    #print models
    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=normal_latent_sampling((latent_dim,)))

    # build adversarial model
    model = AdversarialModel(base_model=gan, player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer, player_optimizers=[opt_g, opt_d], loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))
    def generator_sampler():
        xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
        xpred = scale_value(xpred, [0.0, 1.0])
        return xpred.reshape((10, 10) + xpred.shape[1:])

    save_image_cb = ImageGridCallback('./dcgan-v2-images/' + exp_dir + '/epoch-{:03d}.png', generator_sampler, cmap=None)
    save_model_cb = SaveModelWeights(generator, './dcgan-v2-model-weights/' + exp_dir)

    # train model
    xtrain, xtest = svhn_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [save_image_cb, save_model_cb]

    #train model
    epoch_start = 0
    epoch_count = 100
    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=callbacks, nb_epoch=epoch_start + epoch_count,
                  batch_size=32, initial_epoch = epoch_start, shuffle=True)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv('./dcgan-v2-images/' + exp_dir + '/history.csv')

    #save final models
    generator.save('./dcgan-v2-model-weights/' + exp_dir + '/generator.h5')
    discriminator.save('./dcgan-v2-model-weights/' + exp_dir + '/discriminator.h5')
コード例 #12
0
def main():
    # set path
    root_dir = os.path.abspath('.')
    data_dir = os.path.join(root_dir, 'MData')
            
    # load data
    train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv'))
    # test = pd.read_csv(os.path.join(data_dir, 'test.csv'))

    temp = []
    for img_name in train.filename:
        image_path = os.path.join(data_dir, 'Train', 'Images', 'train', img_name)
        img = imread(image_path, flatten=True)
        img = img.astype('float32')
        temp.append(img)
        
    train_x = np.stack(temp)
    train_x = train_x / 255

    epochs = 1 
    batch_size = 128    

    model_1 = model_generator_cifar()
    model_2 = model_discriminator_cifar()

    # gan = simple_gan(model_1, model_2, normal_latent_sampling((100,)))
    latent_dim = 100
    gan = simple_gan(model_1, model_2, latent_sampling=normal_latent_sampling((latent_dim,)))

    model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy')
    
    history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=epochs, batch_size=batch_size)    
    zsamples = np.random.normal(size=(10, 100))
    pred = model_1.predict(zsamples)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :], cmap='gray')
        plt.savefig('out/animals/'+str(i)+'.png')
コード例 #13
0
ファイル: tutorial.py プロジェクト: mbc1990/animal-generator
def main():
    # to stop potential randomness
    seed = 128
    rng = np.random.RandomState(seed)

    # set path
    root_dir = os.path.abspath('.')
    data_dir = os.path.join(root_dir, 'Data')

    # load data
    train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv'))
    # test = pd.read_csv(os.path.join(data_dir, 'test.csv'))

    temp = []
    for img_name in train.filename:
        image_path = os.path.join(data_dir, 'Train', 'Images', 'train',
                                  img_name)
        img = imread(image_path, flatten=True)
        img = img.astype('float32')
        temp.append(img)

    train_x = np.stack(temp)

    train_x = train_x / 255

    # print image
    img_name = rng.choice(train.filename)
    filepath = os.path.join(data_dir, 'Train', 'Images', 'train', img_name)

    img = imread(filepath, flatten=True)

    # pylab stuff, who f****n knows
    # pylab.imshow(img, cmap='gray')
    # pylab.axis('off')
    # pylab.show()

    # Levers
    g_input_shape = 100
    d_input_shape = (28, 28)
    hidden_1_num_units = 500
    hidden_2_num_units = 500
    g_output_num_units = 784
    d_output_num_units = 1
    epochs = 25
    batch_size = 128

    # generator
    model_1 = Sequential([
        Dense(units=hidden_1_num_units,
              input_dim=g_input_shape,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=g_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Reshape(d_input_shape),
    ])

    # discriminator
    model_2 = Sequential([
        InputLayer(input_shape=d_input_shape),
        Flatten(),
        Dense(units=hidden_1_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=d_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
    ])
    gan = simple_gan(model_1, model_2, normal_latent_sampling((100, )))
    model = AdversarialModel(
        base_model=gan,
        player_params=[model_1.trainable_weights, model_2.trainable_weights])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=['adam', 'adam'],
        loss='binary_crossentropy')

    history = model.fit(x=train_x,
                        y=gan_targets(train_x.shape[0]),
                        epochs=10,
                        batch_size=batch_size)
    zsamples = np.random.normal(size=(10, 100))
    pred = model_1.predict(zsamples)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :], cmap='gray')
        plt.savefig('out/numbers/' + str(i) + '.png')
コード例 #14
0
def main():
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator = model_discriminator(latent_dim, input_shape)
    # bigan (x - > yfake, yreal), z generated on GPU
    bigan = simple_bigan(generator, encoder, discriminator,
                         normal_latent_sampling((latent_dim, )))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    bigan.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(
        base_model=bigan,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    # train model
    xtrain, xtest = mnist_data()

    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png",
                                     generator_sampler)

    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(
        "output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler)

    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/bigan/history.csv")

    encoder.save("output/bigan/encoder.h5")
    generator.save("output/bigan/generator.h5")
    discriminator.save("output/bigan/discriminator.h5")
コード例 #15
0
def example_bigan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 25
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(
        latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim, ))(x)
    # eliminate z from inputs
    bigan_generator = Model([x],
                            fix_names(bigan_generator([z, x]),
                                      bigan_generator.output_names))
    bigan_discriminator = Model([x],
                                fix_names(bigan_discriminator([z, x]),
                                          bigan_discriminator.output_names))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(
        player_models=[bigan_generator, bigan_discriminator],
        player_params=[
            generative_params, discriminator_train.trainable_weights
        ],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss='binary_crossentropy')

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(
        os.path.join(path, "autoencoded-epoch-{:03d}.png"),
        autoencoder_sampler)

    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
コード例 #16
0
# discriminator
model_2 = Sequential([
    InputLayer(input_shape=d_input_shape),
    
    Flatten(),
        
    Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),

    Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
        
    Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
])

print model_1.summary()
print model_2.summary()

from keras_adversarial import AdversarialModel, simple_gan, gan_targets
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling

gan = simple_gan(model_1, model_2, normal_latent_sampling((100,)))
model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights])
model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy')

print gan.summary()

history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=10, batch_size=batch_size)




コード例 #17
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim)

    # assemple AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim, ))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal],
                             ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(
        base_model=aae,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(1e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss={
            "yfake": "binary_crossentropy",
            "yreal": "binary_crossentropy",
            "xpred": "mean_squared_error"
        },
        compile_kwargs={
            "loss_weights": {
                "yfake": 1e-2,
                "yreal": 1e-2,
                "xpred": 1
            }
        })

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        samples = np.concatenate((xsamples, xgen), axis=1)
        return samples

    autoencoder_cb = ImageGridCallback(
        os.path.join(path, "autoencoded-epoch-{:03d}.png"),
        autoencoder_sampler)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    y = [
        xtrain,
        np.ones((n, 1)),
        np.zeros((n, 1)), xtrain,
        np.zeros((n, 1)),
        np.ones((n, 1))
    ]
    ntest = xtest.shape[0]
    ytest = [
        xtest,
        np.ones((ntest, 1)),
        np.zeros((ntest, 1)), xtest,
        np.zeros((ntest, 1)),
        np.ones((ntest, 1))
    ]
    history = model.fit(x=xtrain,
                        y=y,
                        validation_data=(xtest, ytest),
                        callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100,
                        batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #18
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 256
    units = 512
    # x \in R^{28x28}
    ##input_shape = dim_ordering_shape((3, 128, 170))
    input_shape = dim_ordering_shape((3, 32, 32))
    #input_shape = (3,32,32)
    ###input_shape = (48,48,3)

    # generator (z -> x)

    generator = model_generator(latent_dim, units=units)
    # encoder (x ->z)

    encoder = model_encoder(latent_dim, input_shape, units=units)
    # autoencoder (x -> x')

    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)

    discriminator = model_discriminator(latent_dim, units=units)

    # build AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim, ))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal],
                             ["xpred", "yfake", "yreal"]))

    # print summary of models
    print("generator (z -> x)")
    generator.summary()
    print("encoder (x ->z)")
    encoder.summary()
    print("autoencoder (x -> x')")
    discriminator.summary()
    print("discriminator (z -> y)")
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(
        base_model=aae,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(3e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss={
            "yfake": "binary_crossentropy",
            "yreal": "binary_crossentropy",
            "xpred": "mean_squared_error"
        },
        player_compile_kwargs=[{
            "loss_weights": {
                "yfake": 1e-1,
                "yreal": 1e-1,
                "xpred": 1e2
            }
        }] * 2)

    # load mnist data
    xtrain, xtest = benthoz_data()
    print("xtrain shapes {}".format(xtrain.shape))
    print("xtrain mean val {}".format(np.mean(xtrain)))

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return dim_ordering_unfix(generator.predict(zsamples)).transpose(
            (0, 2, 3, 1)).reshape((10, 10, 32, 32, 3))
        #return generator.predict(zsamples).reshape((10, 10, 32, 32, 3))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = dim_ordering_unfix(autoencoder.predict(xrep)).reshape(
            (10, 9, 3, 32, 32))
        xsamples = dim_ordering_unfix(xsamples).reshape((10, 1, 3, 32, 32))
        #xgen = autoencoder.predict(xrep).reshape((10, 9, 32, 32, 3))
        #xsamples = xsamples.reshape((10, 1, 32, 32,3))

        samples = np.concatenate((xsamples, xgen), axis=1)
        samples = samples.transpose((0, 1, 3, 4, 2))
        return samples

    autoencoder_cb = ImageGridCallback(os.path.join(
        path, "autoencoded-epoch-{:03d}.png"),
                                       autoencoder_sampler,
                                       cmap=None)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    print("num train samples {}".format(n))
    y = [
        xtrain,
        np.ones((n, 1)),
        np.zeros((n, 1)), xtrain,
        np.zeros((n, 1)),
        np.ones((n, 1))
    ]
    ntest = xtest.shape[0]
    ytest = [
        xtest,
        np.ones((ntest, 1)),
        np.zeros((ntest, 1)), xtest,
        np.zeros((ntest, 1)),
        np.ones((ntest, 1))
    ]
    history = fit(model,
                  x=xtrain,
                  y=y,
                  validation_data=(xtest, ytest),
                  callbacks=[generator_cb, autoencoder_cb],
                  nb_epoch=100,
                  batch_size=32)

    #history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #19
0

def process_mnist(x):
    x = x.astype(np.float32) / 255.
    return x


def mnist_data():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    return process_mnist(x_train), mnist_process(x_test)


if __name__ == '__main__':
    # z in R^100
    latent_dim = 100

    # x in R^{28 x 28}
    input_shape = (1, 28, 28)

    # generator: (z -> x)
    gen_mod = generator()

    # discriminator: (x -> y)
    disc_mod = discriminator(input_shape)

    # GAN (x -> y_fake, y_real); z idedally generated on GPU
    gan = simple_gan(gen_mod, disc_mod, normal_latent_sampling((latent_dim, )))
    gen_mod.summary()
    disc_mod.summary()
    gan.summary()
コード例 #20
0
ファイル: GANs - 2.py プロジェクト: saumyarshah9697/Scrap
        image_path=os.path.join(path,img_name)
        img=cv2.imread(image_path,cv2.IMREAD_GRAYSCALE)
#        cv2.imshow("Window",img)
#        cv2.waitKey(2)
        img=cv2.resize(img,(100,100))
        img=np.asarray(img)
        temp.append(img)


train_x= np.stack(temp)


cv2.imshow("Window",temp[5][:])
cv2.waitKey(0)

gan= simple_gan(model_G, model_D, normal_latent_sampling((5000,)) )

model = AdversarialModel(base_model=gan,player_params=[model_G.trainable_weights, model_D.trainable_weights])

model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), 
                          player_optimizers=['adam', 'adam'], 
                          loss='binary_crossentropy')

history=model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=100, batch_size=batch_s)



plt.plot(history.history['player_0_loss'])
plt.plot(history.history['player_1_loss'])
plt.plot(history.history['loss'])
コード例 #21
0
def driver_gan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 3
    # x \in R^{28x28}
    input_shape = (15, 6)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
    bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))

    # Merging encoder weights and generator weights
    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
                             player_params=[generative_params, discriminator_train.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-7, decay=1e-7), Adam(1e-6, decay=1e-7)],
                              loss='binary_crossentropy')

    # load driver data
    train_dataset = [1,2,5]
    test_dataset = [3,4]
    train_reader = data_base(train_dataset)
    test_reader = data_base(test_dataset)
    xtrain, xtest = train_reader.read_files(),test_reader.read_files()
    # ---------------------------------------------------------------------------------
    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(1 * 1, latent_dim))  #---------------------------------> (10,10)
        return generator.predict(zsamples).reshape((1, 1, 15, 6))# confused ***********************************default (10,10,28,28)


    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10) # the number of testdata set
        xrep = np.repeat(xsamples, 5, axis=0) # the number of train dataset
        xgen = autoencoder.predict(xrep).reshape((1, 1, 15, 6))
        xsamples = xsamples.reshape((1, 1, 15, 6))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x


    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest),
                        nb_epoch=25, batch_size=10, verbose=0)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
コード例 #22
0
    # X_test is 10000 rows of 28x28 values.
    X_test = X_test.astype('float32') / 255
    return X_train.reshape(-1, 28, 28, 1), X_test.reshape(-1, 28, 28, 1)


if __name__ == "__main__":
    # z in R^100
    latent_dim = 100
    # generator (z -> x)
    generator = model_generator()
    # discriminator (x -> y)
    discriminator = model_discriminator(input_shape=(28, 28, 1))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = keras_adversarial.simple_gan(
        generator, discriminator,
        keras_adversarial.normal_latent_sampling((latent_dim, )))

    # build adversarial model
    model = keras_adversarial.AdversarialModel(
        base_model=gan,
        player_params=[
            generator.trainable_weights, discriminator.trainable_weights
        ],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=[
            keras.optimizers.Adam(1e-4, decay=1e-4),
            keras.optimizers.Adam(1e-3, decay=1e-4)
        ],
        loss='binary_crossentropy')
コード例 #23
0
def example_bigan(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 25
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
    # bigan (z, x - > yfake, yreal)
    bigan_generator = simple_bigan(generator, encoder, discriminator_test)
    bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
    # z generated on GPU based on batch dimension of x
    x = bigan_generator.inputs[1]
    z = normal_latent_sampling((latent_dim,))(x)
    # eliminate z from inputs
    bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
    bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator_train.summary()
    bigan_discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
                             player_params=[generative_params, discriminator_train.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # load mnist data
    xtrain, xtest = mnist_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler)

    # train network
    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator_train.save(os.path.join(path, "discriminator.h5"))
コード例 #24
0
ファイル: run_model.py プロジェクト: nthistle/em-data-gan
def train_em_gan(adversarial_optimizer,
                 generator,
                 discriminator,
                 gen_opt,
                 disc_opt,
                 latent_dim,
                 h5_filename,
                 h5_dataset_path,
                 sample_shape,
                 output_directory,
                 verbose=1,
                 loss='mean_squared_error',
                 epochs=10,
                 per_epoch=100,
                 r_id="em-gan",
                 is_large_model=False):

    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    if verbose >= 1:
        util.print_model_summaries(generator, discriminator, gan)

    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])

    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[gen_opt, disc_opt],
                              loss=loss)

    zsamples = np.random.normal(size=(5, latent_dim))

    sample_generator = util.h5_block_generator(h5_filename, h5_dataset_path,
                                               sample_shape, [1, 0, 0, 1])

    def generator_sampler():
        return generator.predict(zsamples)

    sampler = util.SampleEM(output_directory, generator_sampler,
                            is_large_model)
    gen_saver = util.SaveModel(generator,
                               os.path.join(output_directory, "generator"))
    disc_saver = util.SaveModel(
        discriminator, os.path.join(output_directory, "discriminator"))

    history = model.fit_generator(sample_generator,
                                  per_epoch,
                                  epochs=epochs,
                                  verbose=verbose,
                                  callbacks=[sampler, gen_saver, disc_saver],
                                  validation_data=sample_generator,
                                  validation_steps=(per_epoch // 5))

    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(output_directory, "history.csv"))

    discriminator.save(
        os.path.join(
            output_directory, "gan_disc_" + str(epochs) + "_" +
            str(per_epoch) + "_" + r_id + ".h5"))
    generator.save(
        os.path.join(
            output_directory, "gan_gen_" + str(epochs) + "_" + str(per_epoch) +
            "_" + r_id + ".h5"))

    del model
    del discriminator
    del generator
コード例 #25
0
    return fun


if __name__ == "__main__":
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (1, 28, 28)

    # generator (z -> x)
    generator = model_generator()
    # discriminator (x -> y)
    discriminator = model_discriminator(input_shape=input_shape)
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim,)))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # train model
コード例 #26
0
    return Model(d_input,d_V)

def mnist_process(x):
    x = x.astype(np.float32)/255.0
    return x

def mnist_data():
    (xtrain,ytrain),(xtest,ytest)=mnist.load_data()
    return mnist_process(xtrain),mnist_process(xtest)

if __name__ == "__main__":
    latent_dim = 100
    input_shape=(1,28,28)
    generator = model_generator()
    discriminator = model_discriminator(input_shape=input_shape)
    gan = simple_gan(generator,discriminator,normal_latent_sampling((latent_dim,)))
    generator.summary()
    discriminator.summary()
    gan.summary()

    model = AdversarialModel(base_model=gan,player_params=[generator.trainable_weights,discriminator.trainable_weights],player_names=["generator","discriminator"])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),player_optimizers=[Adam(1e-4,decay=1e-4),Adam(1e-3,decay=1e-4)],loss='binary_crossentropy')

    def generator_sampler():
        zsamples = np.random.normal(size=(10*10,latent_dim))
        gen = dim_ordering_unfix(generator.predict(zsamples))
        return gen.reshape((10,10,28,28))

    generator_cb = ImageGridCallback("output/gan_convolutional/epoch-{:03d}.png",generator_sampler)
    xtrain,xtest=mnist_data()
    xtrain=dim_ordering_fix(xtrain.reshape((-1,1,28,28)))
コード例 #27
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 256
    units = 512
    # x \in R^{28x28}
    input_shape = dim_ordering_shape((3, 32, 32))

    # generator (z -> x)
    generator = model_generator(latent_dim, units=units)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape, units=units)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim, units=units)

    # build AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim,))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(base_model=aae,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(3e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
                                    "xpred": "mean_squared_error"},
                              compile_kwargs={"loss_weights": {"yfake": 1e-1, "yreal": 1e-1, "xpred": 1e2}})

    # load mnist data
    xtrain, xtest = cifar10_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1)).reshape((10, 10, 32, 32, 3))

    generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = dim_ordering_unfix(autoencoder.predict(xrep)).reshape((10, 9, 3, 32, 32))
        xsamples = dim_ordering_unfix(xsamples).reshape((10, 1, 3, 32, 32))
        samples = np.concatenate((xsamples, xgen), axis=1)
        samples = samples.transpose((0, 1, 3, 4, 2))
        return samples

    autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler,
                                       cmap=None)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    y = [xtrain, np.ones((n, 1)), np.zeros((n, 1)), xtrain, np.zeros((n, 1)), np.ones((n, 1))]
    ntest = xtest.shape[0]
    ytest = [xtest, np.ones((ntest, 1)), np.zeros((ntest, 1)), xtest, np.zeros((ntest, 1)), np.ones((ntest, 1))]
    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
                  callbacks=[generator_cb, autoencoder_cb],
                  nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
コード例 #28
0
          kernel_regularizer=L1L2(1e-5, 1e-5)),
    Dense(units=hidden_2_units,
          activation='relu',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
    Dense(units=output_dis_size,
          activation='sigmoid',
          kernel_regularizer=L1L2(1e-5, 1e-5)),
])

# print(generator.summary())
# print(discriminator.summary())

# Build a GAN
gan = simple_gan(generator=generator,
                 discriminator=discriminator,
                 latent_sampling=normal_latent_sampling((100, )))
model = AdversarialModel(
    base_model=gan,
    player_params=[
        generator.trainable_weights, discriminator.trainable_weights
    ],
)

model.adversarial_compile(
    adversarial_optimizer=AdversarialOptimizerSimultaneous(),
    player_optimizers=['adam', 'adam'],
    loss='binary_crossentropy')
history = model.fit(train_data,
                    gan_targets(train_data.shape[0]),
                    epochs=10,
                    batch_size=batch_size)
コード例 #29
0
    Dense(units=784, activation='sigmoid', kernel_regularizer=L1L2(1e-5,
                                                                   1e-5)))
gerador.add(Reshape((28, 28)))

# Discriminador
discriminador = Sequential()
discriminador.add(InputLayer(input_shape=(28, 28)))
discriminador.add(Flatten())
discriminador.add(
    Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))
discriminador.add(
    Dense(units=500, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)))
discriminador.add(
    Dense(units=1, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)))

gan = simple_gan(gerador, discriminador, normal_latent_sampling((100, )))
modelo = AdversarialModel(
    base_model=gan,
    player_params=[gerador.trainable_weights, discriminador.trainable_weights])
modelo.adversarial_compile(
    adversarial_optimizer=AdversarialOptimizerSimultaneous(),
    player_optimizers=['adam', 'adam'],
    loss='binary_crossentropy')
modelo.fit(x=previsores_treinamento,
           y=gan_targets(60000),
           epochs=100,
           batch_size=256)

amostras = np.random.normal(size=(20, 100))
previsao = gerador.predict(amostras)
for i in range(previsao.shape[0]):
コード例 #30
0
def gan():
    # define variables
    # 初始化一些参数
    g_input_shape = 100  # 生成器输入层节点数
    d_input_shape = (28, 28)  # 辨别器输入层节点数
    hidden_1_num_units = 500
    hidden_2_num_units = 500
    g_output_num_units = 784  # 生成器输出层节点数28*28
    d_output_num_units = 1  # 辨别器输出层节点数1个,辨别是否是真实图片
    epochs = 100
    batch_size = 128

    # 定义生成器,用于生成图片
    model_g = Sequential([
        Dense(units=hidden_1_num_units,
              input_dim=g_input_shape,
              activation='relu',
              kernel_regularizer=L1L2(1e-5, 1e-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Dense(units=g_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Reshape(d_input_shape)
    ])

    # 定义分辨器,用于辨别图片
    model_d = Sequential([
        InputLayer(input_shape=d_input_shape),
        Flatten(),
        Dense(units=hidden_1_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Dense(units=hidden_2_num_units,
              activation='relu',
              kernel_regularizer=L1L2(1E-5, 1E-5)),
        Dense(units=d_output_num_units,
              activation='sigmoid',
              kernel_regularizer=L1L2(1E-5, 1E-5))
    ])
    # model_g.summary()
    # model_d.summary()

    from keras_adversarial import AdversarialModel, simple_gan, gan_targets
    from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
    # 开始训练gan网络
    gan = simple_gan(model_g, model_d, normal_latent_sampling((100, )))
    # gan.summary()
    # 在keras2.2.x版本中,下面的代码会报错,keras2.1.2中不会
    model = AdversarialModel(
        base_model=gan,
        player_params=[model_g.trainable_weights, model_d.trainable_weights])
    model.adversarial_compile(
        adversarial_optimizer=AdversarialOptimizerSimultaneous(),
        player_optimizers=['adam', 'adam'],
        loss='binary_crossentropy')
    # 使用训练数据进行训练
    # 把keras_adversarial clone到了本地,然后替换掉了pip安装的keras_adversarial
    # 解决了这个报错AttributeError: 'AdversarialModel' object has no attribute '_feed_output_shapes'
    history = model.fit(x=train_x,
                        y=gan_targets(train_x.shape[0]),
                        epochs=epochs,
                        batch_size=batch_size)
    # 保存为h5文件
    model_g.save_weights('gan1_g.h5')
    model_d.save_weights('gan1_d.h5')
    model.save_weights('gan1.h5')

    # 绘制训练结果的loss
    plt.plot(history.history['player_0_loss'], label='player_0_loss')
    plt.plot(history.history['player_1_loss'], label='player_1_loss')
    plt.plot(history.history['loss'], label='loss')
    plt.show()

    # 训练之后100次之后生成的图像
    # 随机生成10组数据,生成10张图像
    zsample = np.random.normal(size=(10, 100))
    pred = model_g.predict(zsample)
    print(pred.shape)  # (10,28,28)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :], cmap='gray')
        plt.show()
コード例 #31
0
ファイル: m_gan.py プロジェクト: mbc1990/animal-generator
def main():
    data_dir = "goldens_filtered_32x32_gray/"
    out_dir = "m_gan_out/"
    epochs = 1
    batch_size = 64

    # TODO: Research why these values were chosen
    opt_g = Adam(1e-4, decay=1e-5)
    opt_d = Adam(1e-3, decay=1e-5)
    loss = 'binary_crossentropy'
    latent_dim = 100
    adversarial_optimizer = AdversarialOptimizerSimultaneous()

    # My simple models
    # generator = get_generator()
    # discriminator = get_discriminator()

    # CIFAR example convolutional models
    generator = get_generator_cifar()
    discriminator = get_discriminator_cifar()

    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    temp = []
    for img_name in os.listdir(data_dir):
        image_path = data_dir + img_name
        img = imread(image_path)
        img = img.astype('float32')
        temp.append(img)

    train_x = np.stack(temp)
    train_x = train_x / 255

    # Side effects
    model.fit(x=train_x,
              y=gan_targets(train_x.shape[0]),
              epochs=epochs,
              batch_size=batch_size)

    zsamples = np.random.normal(size=(10, latent_dim))
    pred = generator.predict(zsamples)
    for i in range(pred.shape[0]):
        plt.imshow(pred[i, :])
        plt.savefig(out_dir + str(i) + '.png')
コード例 #32
0
    return gen.reshape((10, 10, 92, 92))


if __name__ == "__main__":
    # z \in R^100
    latent_dim = 400
    # x \in R^{92x92}
    input_shape = (1, 92, 92)

    # generator (z -> x)
    generator = model_generator()
    # discriminator (x -> y)
    discriminator = model_discriminator(input_shape=input_shape)
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    #load ancien weights :
    #generator.load_weights('./output/gan_convolutional/generator.h5')
    #discriminator.load_weights('./output/gan_convolutional/discriminator.h5')

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
コード例 #33
0
def main():
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    input_shape = (28, 28)

    # generator (z -> x)
    generator = model_generator(latent_dim, input_shape)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (x -> y)
    discriminator = model_discriminator(latent_dim, input_shape)
    # bigan (x - > yfake, yreal), z generated on GPU
    bigan = simple_bigan(generator, encoder, discriminator, normal_latent_sampling((latent_dim,)))

    generative_params = generator.trainable_weights + encoder.trainable_weights

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    bigan.summary()
    autoencoder.summary()

    # build adversarial model
    model = AdversarialModel(base_model=bigan,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),
                              player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss='binary_crossentropy')

    # train model
    xtrain, xtest = mnist_data()

    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png", generator_sampler)

    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
        xsamples = xsamples.reshape((10, 1, 28, 28))
        x = np.concatenate((xsamples, xgen), axis=1)
        return x

    autoencoder_cb = ImageGridCallback("output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler)

    y = gan_targets(xtrain.shape[0])
    ytest = gan_targets(xtest.shape[0])
    history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
                        nb_epoch=100, batch_size=32)
    df = pd.DataFrame(history.history)
    df.to_csv("output/bigan/history.csv")

    encoder.save("output/bigan/encoder.h5")
    generator.save("output/bigan/generator.h5")
    discriminator.save("output/bigan/discriminator.h5")