示例#1
0
def get_encoder_decoder(vocab):
    """ Given the arguments, returns the correct combination of CNN/RNN/GAN encoders and decoders. """
    if args.pretrain_rnn:
        encoder = EncoderRNN(len(vocab),
                             args.embed_size,
                             args.encoder_rnn_hidden_size,
                             num_layers=args.num_layers).to(device)
    elif args.gan_embedding:
        gan = torch.load('DCGAN_embed_2.tch').to(device)
        encoder = gan.discriminator
    elif args.progan_embedding:
        pro_gan = pg.ProGAN(depth=7,
                            latent_size=256,
                            device=torch.device('cuda'))
        pro_gan.dis.load_state_dict(torch.load('progan_weights/GAN_DIS_6.pth'))
        # pro_gan.dis_optim.load_state_dict(torch.load('progan_weights/GAN_DIS_OPTIM_6.pth'))
        pro_gan.gen.load_state_dict(torch.load('progan_weights/GAN_GEN_6.pth'))
        # pro_gan.gen_optim.load_state_dict(torch.load('progan_weights/GAN_GEN_OPTIM_6.pth'))
        pro_gan.gen_shadow.load_state_dict(
            torch.load('progan_weights/GAN_GEN_SHADOW_6.pth'))
        print("Loaded proGAN weights.", flush=True)
        encoder = pro_gan.dis.to(device)
    else:
        encoder = EncoderCNN(args.embed_size).to(device)

    decoder = DecoderRNNOld(args.embed_size,
                            args.decoder_rnn_hidden_size,
                            len(vocab),
                            args.num_layers,
                            vocab,
                            device=device).to(device)
    return encoder, decoder
示例#2
0
def train_model(device_to_run):
    #Data
    dataset = LandscapeImages()

    #Hyperparameters
    depth = 7
    batch_sizes = [5, 5, 5, 5, 5, 5, 5]
    num_epochs = [10, 15, 20, 20, 20, 20, 20]
    fade_ins = [50, 50, 50, 50, 50, 50, 50]
    latent_size = 512

    gan = pg.ProGAN(device=device_to_run, latent_size=latent_size, depth=depth)

    gan.train(dataset=dataset,
              epochs=num_epochs,
              batch_sizes=batch_sizes,
              fade_in_percentage=fade_ins,
              num_workers=3)
def generateNewImages():

    print("Generate image")
    device = th.device("cuda" if th.cuda.is_available() else "cpu")

    gen = th.nn.DataParallel(pg.Generator(depth=6, latent_size=128))

    gen.load_state_dict(th.load("GAN_GEN_5.pth", map_location=str(device)))
    #     noise = th.randn(1, 128).to(device)
    #     sample_image = gen(noise, depth=5, alpha=1).detach()
    #     vutils.save_image(sample_image[0, :, :, :], 'portrait_' + str(1) + '.png'.format(3))
    noise = th.randn(32, 128).to(device)
    sample_image = gen(noise, depth=5, alpha=1).detach()

    for x in range(18):

        vutils.save_image(sample_image[x, :, :, :],
                          '/home/ubuntu/GanZoo/static/img/display/portrait_' +
                          str(x) + '.png'.format(3),
                          normalize=True)
    return render_template('index.html')
示例#4
0
from matplotlib.animation import FuncAnimation
from pro_gan_pytorch import PRO_GAN as pg

# ==========================================================================
# Tweakable parameters
# ==========================================================================
depth = 8
num_points = 12
transition_points = 30
# ==========================================================================

# create the device for running the demo:
device = th.device("cuda" if th.cuda.is_available() else "cpu")

# load the model for the demo
gen = th.nn.DataParallel(pg.Generator(depth=9))
gen.load_state_dict(th.load("GAN_GEN_SHADOW_8.pth", map_location=str(device)))


# function to generate an image given a latent_point
def get_image(point):
    img = gen(point, depth=depth, alpha=1).detach().squeeze(0).permute(1, 2, 0)
    img = (img - img.min()) / (img.max() - img.min())
    return img.cpu().numpy()


# generate the set of points:
fixed_points = th.randn(num_points, 512).to(device)
fixed_points = (fixed_points / fixed_points.norm(dim=1, keepdim=True)) * (512**
                                                                          0.5)
points = []  # start with an empty list
示例#5
0
if __name__ == '__main__':

    # some parameters:
    depth = 4
    # hyper-parameters per depth (resolution)
    num_epochs = [10, 20, 20, 20]
    fade_ins = [50, 50, 50, 50]
    batch_sizes = [128, 128, 128, 128]
    latent_size = 128

    # get the data. Ignore the test data and their classes
    _, dataset, _ = setup_data()

    # ======================================================================
    # This line creates the PRO-GAN
    # ======================================================================
    pro_gan = pg.ConditionalProGAN(num_classes=5,
                                   depth=depth,
                                   latent_size=latent_size,
                                   device=device)
    # ======================================================================

    # ======================================================================
    # This line trains the PRO-GAN
    # ======================================================================
    pro_gan.train(dataset=dataset,
                  epochs=num_epochs,
                  fade_in_percentage=fade_ins,
                  batch_sizes=batch_sizes)
    # ======================================================================
示例#6
0
    current_depth = state['current_depth']
    epoch = state['epoch']
    print('Loaded {}'.format(basename))
    return current_depth, epoch


if __name__ == '__main__':

    # some parameters:
    depth = 6
    latent_size = 512

    # ======================================================================
    # This line creates the PRO-GAN
    # ======================================================================
    pro_gan = pg.ProGAN(depth=depth, latent_size=latent_size, device=device)
    # gen = pg.Generator(depth=depth, latent_size=latent_size, use_eql=False).to(device)
    # ======================================================================

    current_depth, epoch = load_checkpoint(pro_gan, ckpt_dir)
    # if epoch != 10:
    # current_depth -= 1

    # OUT_DIR = out_dir
    # for current_depth in range(depth):
    current_depth -= 1
    if True:
        load_checkpoint(pro_gan,
                        ckpt_dir,
                        basename='checkpoint-{}-10.ckpt'.format(current_depth))
        # out_dir = '{}-{}'.format(OUT_DIR, current_depth)
示例#7
0
    depth = 6
    # hyper-parameters per depth (resolution) default=10,10,20,20,20,20,20 can be higher for more detail. 
    # Change Batch size to get different amounts per grid. refers to the total number of images produced. default is 32.
    # amount needs to match depth parameter
    batch = args.batch
    epoch = args.epochs
    num_epochs = [epoch, epoch, epoch, epoch, epoch, epoch]
    fade_ins = [50, 50, 50, 50, 50, 50]
    batch_sizes = [batch, batch, batch, batch, batch, batch]
    latent_size = 128

    # get the data. Ignore the test data and their classes
    _, dataset, _ = setup_data(download=True)

    # ======================================================================
    # This line creates the PRO-GAN
    # ======================================================================
    pro_gan = pg.ConditionalProGAN(num_classes=len(dataset.classes), depth=depth, 
                                   latent_size=latent_size, device=device)
    # ======================================================================

    # ======================================================================
    # This line trains the PRO-GAN
    # ======================================================================
    pro_gan.train(
        dataset=dataset,
        epochs=num_epochs,
        fade_in_percentage=fade_ins,
        batch_sizes=batch_sizes
    )
    # ====================================================================== 
示例#8
0
import torch as th
import pro_gan_pytorch.PRO_GAN as pg
import matplotlib.pyplot as plt

device = th.device("cuda" if th.cuda.is_available() else "cpu")
gen = pg.Generator(depth=4, latent_size=128, use_eql=False).to(device)
gen.load_state_dict(th.load("training_runs/haiti/saved_models/GAN_GEN_3.pth"))
noise = th.randn(1, 128).to(device)
sample_image = gen(noise, detph=3, alpha=1).detach()
plt.imshow(sample_image[0].permute(1, 2, 0) / 2 + 0.5)
plt.show()