예제 #1
0
def gan_model_test():
    latent_dim = 10
    input_dim = 5
    generator = model_generator(input_dim=input_dim, latent_dim=latent_dim)
    discriminator = model_discriminator(input_dim=input_dim)
    gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim,)))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    adversarial_optimizer = AdversarialOptimizerSimultaneous()
    opt_g = Adam(1e-4)
    opt_d = Adam(1e-3)
    loss = 'binary_crossentropy'
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # train model
    batch_size = 32
    n = batch_size * 8
    x = np.random.random((n, input_dim))
    y = gan_targets(n)
    fit(model, x, y, nb_epoch=3, batch_size=batch_size)
예제 #2
0
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z is gaussian generated on GPU
    # can also experiment with uniform_latent_sampling
    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=normal_latent_sampling((latent_dim,)))

    # 적대적 모델 정의
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # 이미지 생성을 위한 콜백 생성
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"), generator_sampler, cmap=None)

    # 모델 학습
    xtrain, xtest = cifar10_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    K.set_image_dim_ordering('tf')
    if K.backend() == "tensorflow":
        os.makedirs(path + '/logs',exist_ok=True)
        callbacks.append(
            TensorBoard(log_dir=os.path.join(path, 'logs'), histogram_freq=0, write_graph=True, write_images=True))

    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
                  callbacks=callbacks, nb_epoch=nb_epoch,
                  batch_size=32)


    # 히스토리를 CSV에 저장
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # 모델 저장
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
예제 #3
0
파일: main.py 프로젝트: DrRedwanNewaz/D-GAN
def train_gan(adversarial_optimizer,
              path,
              opt_g,
              opt_d,
              nb_epoch,
              generator,
              discriminator,
              latent_dim,
              targets=gan_targets,
              loss='mse'):
    csvpath = os.path.join(path, "history.csv")
    id = 0
    while os.path.exists(csvpath):
        name = "history_%d" % id + ".csv"
        csvpath = os.path.join(path, name)
        print("Already exists: {}".format(csvpath))
        id += 1

    print("Training: {}".format(csvpath))
    print(os.path.join(path, 'logs'))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    xtrain, xtest = db.get_dataset(5, 'train')
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])

    history = fit(model,
                  x=xtrain,
                  y=y,
                  validation_data=(xtest, ytest),
                  nb_epoch=nb_epoch,
                  batch_size=1,
                  verbose=2)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)
    # save models
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
    return generator
예제 #4
0
def main():
    # Uncomment this for debugging
    # sess = K.get_session()
    # sess = tf_debug.LocalCLIDebugWrapperSession(sess)
    # K.set_session(sess)

    xtrain = imageloader.preprocess(
        imageloader.filter_paintings(imageloader.load_training_data()))
    y = gan_targets(xtrain.shape[0])
    y[-1] -= 0.1  # 1-sided label smoothing "hack"
    z = np.random.normal(size=(xtrain.shape[0], latent_dim))

    catalog_file = catalog.load_catalog()
    numerical_categories = catalog.transform_categorical_to_numerical(
        catalog.types(catalog_file))
    one_hots = transform_to_one_hot_vectors(numerical_categories)

    current_epoch, discriminator, generator = load_models()

    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=None)

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss={
                                  "yfake": "binary_crossentropy",
                                  "yreal": "binary_crossentropy",
                                  "yreal_label": "categorical_crossentropy",
                                  "yfake_label": "categorical_crossentropy"
                              })

    callbacks = initialize_callbacks(path, generator, discriminator,
                                     latent_dim)

    y = y[:2] + [one_hots] * 2 + y[2:] + [one_hots] * 2
    history = fit(
        model,
        x=[z, numerical_categories[imageloader.painting_filter()], xtrain],
        y=y,
        callbacks=callbacks,
        nb_epoch=nb_epoch,
        initial_epoch=current_epoch,
        batch_size=32)
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z is gaussian generated on GPU
    # can also experiment with uniform_latent_sampling
    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=normal_latent_sampling((latent_dim,)))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
        return xpred.reshape((10, 10) + xpred.shape[1:])

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"), generator_sampler, cmap=None)

    # train model
    xtrain, xtest = cifar10_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    if K.backend() == "tensorflow":
        callbacks.append(
            TensorBoard(log_dir=os.path.join(path, 'logs'), histogram_freq=0, write_graph=True, write_images=True))
    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
                  callbacks=callbacks, nb_epoch=nb_epoch,
                  batch_size=32)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
예제 #6
0
def example_gan(adversarial_optimizer,
                path,
                opt_g,
                opt_d,
                nb_epoch,
                generator,
                discriminator,
                latent_dim,
                targets=gan_targets,
                loss='binary_crossentropy'):

    # gan (x - > yfake, yreal)
    # z generated on GPU
    gan = simple_gan(generator, discriminator,
                     normal_latent_sampling((latent_dim, )))

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[
                                 generator.trainable_weights,
                                 discriminator.trainable_weights
                             ],
                             player_names=["generator", "discriminator"])

    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"),
                                     generator_sampler)

    # train model
    xtrain, xtest = mnist_data()

    # targets = gan_targets -> a 0/1 címkéket rendeli az adatokhoz
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    history = fit(model,
                  x=xtrain,
                  y=y,
                  validation_data=(xtest, ytest),
                  callbacks=callbacks,
                  nb_epoch=nb_epoch,
                  batch_size=32)
def example_gan(adversarial_optimizer, path, opt_g, opt_d, nb_epoch, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    csvpath = os.path.join(path, "history.csv")
    if os.path.exists(csvpath):
        print("Already exists: {}".format(csvpath))
        return

    print("Training: {}".format(csvpath))
    # gan (x - > yfake, yreal), z generated on GPU
    gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim,)))

    # print summary of models
    generator.summary()
    discriminator.summary()
    gan.summary()

    # build adversarial model
    model = AdversarialModel(base_model=gan,
                             player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[opt_g, opt_d],
                              loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))

    def generator_sampler():
        return generator.predict(zsamples).reshape((10, 10, 28, 28))

    generator_cb = ImageGridCallback(os.path.join(path, "epoch-{:03d}.png"), generator_sampler)

    # train model
    xtrain, xtest = mnist_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [generator_cb]
    if K.backend() == "tensorflow":
        callbacks.append(
            TensorBoard(log_dir=os.path.join(path, 'logs'), histogram_freq=0, write_graph=True, write_images=True))
    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=callbacks, nb_epoch=nb_epoch,
                  batch_size=32)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv(csvpath)

    # save models
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
예제 #8
0
def run_gan(exp_dir, adversarial_optimizer, opt_g, opt_d, generator, discriminator, latent_dim,
                targets=gan_targets, loss='binary_crossentropy'):
    #print models
    generator.summary()
    discriminator.summary()
    gan = simple_gan(generator=generator,
                     discriminator=discriminator,
                     latent_sampling=normal_latent_sampling((latent_dim,)))

    # build adversarial model
    model = AdversarialModel(base_model=gan, player_params=[generator.trainable_weights, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer, player_optimizers=[opt_g, opt_d], loss=loss)

    # create callback to generate images
    zsamples = np.random.normal(size=(10 * 10, latent_dim))
    def generator_sampler():
        xpred = dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1))
        xpred = scale_value(xpred, [0.0, 1.0])
        return xpred.reshape((10, 10) + xpred.shape[1:])

    save_image_cb = ImageGridCallback('./dcgan-v2-images/' + exp_dir + '/epoch-{:03d}.png', generator_sampler, cmap=None)
    save_model_cb = SaveModelWeights(generator, './dcgan-v2-model-weights/' + exp_dir)

    # train model
    xtrain, xtest = svhn_data()
    y = targets(xtrain.shape[0])
    ytest = targets(xtest.shape[0])
    callbacks = [save_image_cb, save_model_cb]

    #train model
    epoch_start = 0
    epoch_count = 100
    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=callbacks, nb_epoch=epoch_start + epoch_count,
                  batch_size=32, initial_epoch = epoch_start, shuffle=True)

    # save history to CSV
    df = pd.DataFrame(history.history)
    df.to_csv('./dcgan-v2-images/' + exp_dir + '/history.csv')

    #save final models
    generator.save('./dcgan-v2-model-weights/' + exp_dir + '/generator.h5')
    discriminator.save('./dcgan-v2-model-weights/' + exp_dir + '/discriminator.h5')
예제 #9
0
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 256
    units = 512
    # x \in R^{28x28}
    input_shape = dim_ordering_shape((3, 32, 32))

    # generator (z -> x)
    generator = model_generator(latent_dim, units=units)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape, units=units)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim, units=units)

    # build AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim, ))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal],
                             ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(
        base_model=aae,
        player_params=[generative_params, discriminator.trainable_weights],
        player_names=["generator", "discriminator"])
    model.adversarial_compile(
        adversarial_optimizer=adversarial_optimizer,
        player_optimizers=[Adam(3e-4, decay=1e-4),
                           Adam(1e-3, decay=1e-4)],
        loss={
            "yfake": "binary_crossentropy",
            "yreal": "binary_crossentropy",
            "xpred": "mean_squared_error"
        },
        player_compile_kwargs=[{
            "loss_weights": {
                "yfake": 1e-1,
                "yreal": 1e-1,
                "xpred": 1e2
            }
        }] * 2)

    # load mnist data
    xtrain, xtest = cifar10_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return dim_ordering_unfix(generator.predict(zsamples)).transpose(
            (0, 2, 3, 1)).reshape((10, 10, 32, 32, 3))

    generator_cb = ImageGridCallback(
        os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = dim_ordering_unfix(autoencoder.predict(xrep)).reshape(
            (10, 9, 3, 32, 32))
        xsamples = dim_ordering_unfix(xsamples).reshape((10, 1, 3, 32, 32))
        samples = np.concatenate((xsamples, xgen), axis=1)
        samples = samples.transpose((0, 1, 3, 4, 2))
        return samples

    autoencoder_cb = ImageGridCallback(os.path.join(
        path, "autoencoded-epoch-{:03d}.png"),
                                       autoencoder_sampler,
                                       cmap=None)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    y = [
        xtrain,
        np.ones((n, 1)),
        np.zeros((n, 1)), xtrain,
        np.zeros((n, 1)),
        np.ones((n, 1))
    ]
    ntest = xtest.shape[0]
    ytest = [
        xtest,
        np.ones((ntest, 1)),
        np.zeros((ntest, 1)), xtest,
        np.zeros((ntest, 1)),
        np.ones((ntest, 1))
    ]
    history = fit(model,
                  x=xtrain,
                  y=y,
                  validation_data=(xtest, ytest),
                  callbacks=[generator_cb, autoencoder_cb],
                  nb_epoch=100,
                  batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))
def example_aae(path, adversarial_optimizer):
    # z \in R^100
    latent_dim = 256
    units = 512
    # x \in R^{28x28}
    input_shape = dim_ordering_shape((3, 32, 32))

    # generator (z -> x)
    generator = model_generator(latent_dim, units=units)
    # encoder (x ->z)
    encoder = model_encoder(latent_dim, input_shape, units=units)
    # autoencoder (x -> x')
    autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
    # discriminator (z -> y)
    discriminator = model_discriminator(latent_dim, units=units)

    # build AAE
    x = encoder.inputs[0]
    z = encoder(x)
    xpred = generator(z)
    zreal = normal_latent_sampling((latent_dim,))(x)
    yreal = discriminator(zreal)
    yfake = discriminator(z)
    aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))

    # print summary of models
    generator.summary()
    encoder.summary()
    discriminator.summary()
    autoencoder.summary()

    # build adversarial model
    generative_params = generator.trainable_weights + encoder.trainable_weights
    model = AdversarialModel(base_model=aae,
                             player_params=[generative_params, discriminator.trainable_weights],
                             player_names=["generator", "discriminator"])
    model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
                              player_optimizers=[Adam(3e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
                              loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
                                    "xpred": "mean_squared_error"},
                              compile_kwargs={"loss_weights": {"yfake": 1e-1, "yreal": 1e-1, "xpred": 1e2}})

    # load mnist data
    xtrain, xtest = cifar10_data()

    # callback for image grid of generated samples
    def generator_sampler():
        zsamples = np.random.normal(size=(10 * 10, latent_dim))
        return dim_ordering_unfix(generator.predict(zsamples)).transpose((0, 2, 3, 1)).reshape((10, 10, 32, 32, 3))

    generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)

    # callback for image grid of autoencoded samples
    def autoencoder_sampler():
        xsamples = n_choice(xtest, 10)
        xrep = np.repeat(xsamples, 9, axis=0)
        xgen = dim_ordering_unfix(autoencoder.predict(xrep)).reshape((10, 9, 3, 32, 32))
        xsamples = dim_ordering_unfix(xsamples).reshape((10, 1, 3, 32, 32))
        samples = np.concatenate((xsamples, xgen), axis=1)
        samples = samples.transpose((0, 1, 3, 4, 2))
        return samples

    autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler,
                                       cmap=None)

    # train network
    # generator, discriminator; pred, yfake, yreal
    n = xtrain.shape[0]
    y = [xtrain, np.ones((n, 1)), np.zeros((n, 1)), xtrain, np.zeros((n, 1)), np.ones((n, 1))]
    ntest = xtest.shape[0]
    ytest = [xtest, np.ones((ntest, 1)), np.zeros((ntest, 1)), xtest, np.zeros((ntest, 1)), np.ones((ntest, 1))]
    history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
                  callbacks=[generator_cb, autoencoder_cb],
                  nb_epoch=100, batch_size=32)

    # save history
    df = pd.DataFrame(history.history)
    df.to_csv(os.path.join(path, "history.csv"))

    # save model
    encoder.save(os.path.join(path, "encoder.h5"))
    generator.save(os.path.join(path, "generator.h5"))
    discriminator.save(os.path.join(path, "discriminator.h5"))