예제 #1
0
from GAN.utils import vis_grid
from GAN.utils.data import transform_skeleton, inverse_transform_skeleton
from GAN.utils.init import InitNormal

from keras.optimizers import Adam, SGD, RMSprop

if __name__ == '__main__':
    nbatch = 128 
    nmax   = nbatch * 100
    npxw, npxh = 64, 128

    from load import people, load_all
    va_data, tr_stream, _ = people(pathfile='protocol/PPPS.txt', size=(npxw, npxh), batch_size=nbatch)


    g = Generator(g_size=(8, npxh, npxw), g_nb_filters=128, g_nb_coding=500, g_scales=4, g_init=InitNormal(scale=0.002))#, g_FC=[5000])
    d = Discriminator(d_size=g.g_size, d_nb_filters=128, d_scales=4, d_init=InitNormal(scale=0.002))#, d_FC=[5000])
    gan = GAN(g, d)

    from keras.optimizers import Adam, SGD, RMSprop
    gan.fit(tr_stream, 
                save_dir='./samples/parsing_skeleton/', 
                k=1, 
                nbatch=nbatch,
                nmax=nmax,
                opt=Adam(lr=0.0002, beta_1=0.5, decay=1e-5),
                transform=transform_skeleton, #opt=RMSprop(lr=0.01))
                inverse_transform=inverse_transform_skeleton)
    

예제 #2
0
if __name__ == '__main__':
    nbatch = 128 
    x, y, stream = get_mnist(nbatch)

    g = InfoGenerator(g_size=(1, 28, 28), 
                      g_nb_filters=64, 
                      g_nb_noise=100,  # not coding but noise
                      g_scales=2, 
                      g_FC=[1024],
                      g_info=[
                        CategoryDist(n=10, lmbd=1e-3),
#                       UniformDist(min=-1.0,max=+1.0,lmbd=1e-3,stddev_fix=True),
#                       UniformDist(min=-1.0,max=+1.0,lmbd=1e-3,stddev_fix=True),
#                       UniformDist(min=-1.0,max=+1.0,lmbd=1e-3,stddev_fix=True),
                      ],
                      g_init=InitNormal(scale=0.02),
                      )
    d = Discriminator(d_size=g.g_size, d_nb_filters=64, d_scales=2, d_FC=[1024], d_init=InitNormal(scale=0.02))
    Q = Sequential([ Dense(200, batch_input_shape=d.layers[-2].output_shape) ,
                     BN(),
                     Activation('relu'),
                     Dense(g.g_info.Qdim),
                   ])

    gan = InfoGAN(generator=g, discriminator=d, Qdist=Q)
    from keras.optimizers import Adam, SGD, RMSprop
    gan.fit(stream, save_dir='./samples/mnist_info', k=2, 
                                        nmax=nbatch*100,
                                        nbatch=nbatch, 
#                                       opt=RMSprop(lr=0.0005))
                                        opt=Adam(lr=0.0001))
예제 #3
0
if __name__ == '__main__':
    nbatch = 128
    nmax = nbatch * 100
    npxw, npxh = 64, 128

    from load import people
    va_data, tr_stream, _ = people(pathfile='protocol/cuhk01-train.txt',
                                   size=(npxw, npxh),
                                   batch_size=nbatch)

    g = Generator(g_size=(3, npxh, npxw),
                  g_nb_filters=128,
                  g_nb_coding=200,
                  g_scales=4,
                  g_init=InitNormal(scale=0.002))
    d = Discriminator(d_size=g.g_size,
                      d_nb_filters=128,
                      d_scales=4,
                      d_init=InitNormal(scale=0.002))
    gan = GAN(g, d)

    from keras.optimizers import Adam, SGD, RMSprop
    gan.fit(tr_stream,
            save_dir='/home/shaofan/Projects/JSTL/transfer/gan/',
            k=1,
            nbatch=nbatch,
            nmax=nmax,
            opt=Adam(lr=0.0003, beta_1=0.5, decay=1e-5))
    #opt=RMSprop(lr=0.01))
예제 #4
0
                                     size=nbatch)].transpose(0, 2, 3, 1)

    return x, y, random_stream


if __name__ == '__main__':
    # init with ae and then run gan
    nbatch = 128
    x, y, stream = get_mnist(nbatch)

    g = Generator(g_size=(1, 28, 28),
                  g_nb_filters=64,
                  g_nb_coding=200,
                  g_scales=2,
                  g_FC=[1024],
                  g_init=InitNormal(scale=0.05))
    d = Discriminator(d_size=g.g_size,
                      d_nb_filters=64,
                      d_scales=2,
                      d_FC=[1024],
                      d_init=InitNormal(scale=0.05))

    g.load_weights('models/mnist_ae_g.h5')
    print g.get_weights()[0].sum()
    d.load_weights('models/mnist_ae_d.h5')
    print d.get_weights()[0].sum()

    #    gan = GAN(g, d)
    gan = AEGAN(g, d)
    from keras.optimizers import Adam, SGD, RMSprop
    gan.fit(stream,
예제 #5
0
if __name__ == '__main__':
    nbatch = 128
    nmax = nbatch * 100
    npxw, npxh = 64, 128

    from load import people, load_all
    va_data, tr_stream, _ = people(pathfile='protocol/cuhk01-train.txt',
                                   size=(npxw, npxh),
                                   batch_size=nbatch)
    allx = transform(load_all('protocol/cuhk01-train.txt', (npxw, npxh)))

    g = Generator(g_size=(3, npxh, npxw),
                  g_nb_filters=128,
                  g_nb_coding=5000,
                  g_scales=4,
                  g_init=InitNormal(scale=0.002))  #, g_FC=[5000])
    d = Discriminator(d_size=g.g_size,
                      d_nb_filters=128,
                      d_scales=4,
                      d_init=InitNormal(scale=0.002))  #, d_FC=[5000])

    # init with autoencoder
    ae = Autoencoder(g, d)
    #   ae.fit(tr_stream,
    #           save_dir='./samples/reid_aegan_5000/ae/',
    #           nbatch=nbatch,
    #           opt=Adam(lr=0.002),
    #           niter=1001)
    ae.autoencoder.load_weights(
        './samples/reid_aegan_5000/ae/1000_ae_params.h5')
예제 #6
0
    def random_stream():
        while 1:
            yield x[np.random.choice(x.shape[0], replace=False,
                                     size=nbatch)].transpose(0, 2, 3, 1)

    return x, y, random_stream


from models_WGAN import generator_upsampling, generator_deconv
from models_WGAN import discriminator

if __name__ == '__main__':
    nbatch = 64
    x, y, stream = get_mnist(nbatch)

    init = InitNormal(scale=0.02)
    #   g = MLP(g_size=(1, 28, 28),
    #               g_nb_filters=128,
    #               g_nb_coding=50,
    #               g_init=init)
    g = Generator(g_size=(1, 28, 28),
                  g_nb_filters=32,
                  g_FC=[1024],
                  g_nb_coding=100,
                  g_scales=2,
                  g_init=init)

    #   d = Sequential([
    #           Flatten(input_shape=g.output_shape[1:]),
    #           Dense(128, init=init),
    #           Activation('relu'),
예제 #7
0
import numpy as np
from sklearn.datasets import fetch_mldata

from GAN.models import Generator, Critic, WGAN, MLP
from GAN.utils import vis_grid
from GAN.utils.data import transform, inverse_transform
from GAN.utils.init import InitNormal
from keras.models import Sequential
from keras.layers import Flatten, Dense, Activation

if __name__ == '__main__':
    nbatch = 64 
    npxw, npxh = 64, 128

    from load import people
    va_data, stream, _ = people(pathfile='protocol/cuhk01-train.txt', size=(npxw, npxh), batch_size=nbatch)

    g = Generator(g_size=(3, npxh, npxw), g_nb_filters=128, g_nb_coding=50, g_scales=4, g_init=InitNormal(scale=0.001))
    d = Critic(d_size=g.g_size, d_nb_filters=128, d_scales=4, d_init=InitNormal(scale=0.001))

    gan = WGAN(g, d)

    from keras.optimizers import Adam, SGD, RMSprop
    gan.fit(stream, save_dir='./labs/WGAN_reid',
        opts = [RMSprop(lr=0.00005, clipvalue=0.01),
                RMSprop(lr=0.00005)],
        niter=100000,
    )