def build_model(sess, embedding_dim, batch_size):
    model = CondGAN(lr_imsize=cfg.TEST.LR_IMSIZE, hr_lr_ratio=int(cfg.TEST.HR_IMSIZE/cfg.TEST.LR_IMSIZE))

    embeddings = tf.placeholder(tf.float32, [batch_size, embedding_dim], name='conditional_embeddings')

    with tf.variable_scope("g_net"):
        c = sample_encoded_context(embeddings, model)
        z = tf.random_normal([batch_size, cfg.Z_DIM])
        fake_images = model.get_generator(tf.concat([c, z], 1,), False)
    with tf.variable_scope("hr_g_net"):
        hr_c = sample_encoded_context(embeddings, model)
        hr_fake_images = model.hr_get_generator(fake_images, hr_c, False)

    ckt_path = cfg.TEST.PRETRAINED_MODEL
    if ckt_path.find('.ckpt') != -1:
        print("Reading model parameters from %s" % ckt_path)
        saver = tf.train.Saver(tf.all_variables())
        saver.restore(sess, ckt_path)
    else:
        print("Input a valid model path.")
    return embeddings, fake_images, hr_fake_images
Ejemplo n.º 2
0
import torch
import matplotlib.pyplot as plt
from torchvision import utils
from model import CondGAN
from utils import create_demo, create_c_demo

pretraiend = '../pre_trained/CondDCGAN_epoch300.pth.tar'
checkpoints = torch.load(pretraiend)
net = CondGAN(use_sigmoid=True)
net.load_state_dict(checkpoints['state_dict'])
z = torch.randn(4, 100, 1, 1)
hair, eyes = range(4), range(4)
c = torch.from_numpy(create_c_demo(hair, eyes)).float()

img = create_demo(net.G, z, c, use_cuda=True, cond=True)
img = utils.make_grid(img.data, normalize=True)
img = img.cpu().numpy().transpose((1, 2, 0))
plt.imshow(img)
plt.show()
Ejemplo n.º 3
0
    timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')

    datadir = 'Data/%s' % cfg.DATASET_NAME
    dataset = TextDataset(datadir, cfg.EMBEDDING_TYPE, 1)
    filename_test = '%s/test' % (datadir)
    dataset.test = dataset.get_data(filename_test)
    if cfg.TRAIN.FLAG:
        filename_train = '%s/train' % (datadir)
        dataset.train = dataset.get_data(filename_train)

        ckt_logs_dir = "ckt_logs/%s/%s_%s" % (cfg.DATASET_NAME,
                                              cfg.CONFIG_NAME, timestamp)
        mkdir_p(ckt_logs_dir)
    else:
        s_tmp = cfg.TRAIN.PRETRAINED_MODEL
        ckt_logs_dir = s_tmp[:s_tmp.find('.ckpt')]

    model = CondGAN(image_shape=dataset.image_shape)

    algo = CondGANTrainer(model=model,
                          dataset=dataset,
                          ckt_logs_dir=ckt_logs_dir)

    if cfg.TRAIN.FLAG:
        algo.train()
    else:
        ''' For every input text embedding/sentence in the
        training and test datasets, generate cfg.TRAIN.NUM_COPY
        images with randomness from noise z and conditioning augmentation.'''
        algo.evaluate()
Ejemplo n.º 4
0
    'gan': gan,
    'a': 0,
    'b': 1,
    'c': 1,
    'clip': 0.01,
    'valid': True,
    'val_step': 200,
    'save_dir': 'results/1_3_2/' + gan,
    'save_freq': 50,
    'save_grad': False,
    'visdom': True,
    'visdom_iter': True
}

config = Namespace(**config)
model = CondGAN(name='Cond' + config.gan, use_sigmoid=config.gan == 'DCGAN')
model.apply(weights_init_normal)

if config.gan == 'DCGAN':
    optimizer = {
        'D': optim.Adam(model.D.parameters(), lr=0.0002, betas=(0.5, 0.999)),
        'G': optim.Adam(model.G.parameters(), lr=0.0002, betas=(0.5, 0.999))
    }
    criterion = bce_loss
elif config.gan == 'LSGAN':
    config.a, config.b, config.c = 0, 1, 1
    optimizer = {
        'D': optim.Adam(model.D.parameters(), lr=0.0001, betas=(0.5, 0.999)),
        'G': optim.Adam(model.G.parameters(), lr=0.0001, betas=(0.5, 0.999))
    }
    criterion = ls_loss
Ejemplo n.º 5
0
    datadir = 'Data/%s' % cfg.DATASET_NAME
    dataset = TextDataset(datadir, cfg.EMBEDDING_TYPE, 4)
    filename_test = '%s/test' % (datadir)
    dataset.test = dataset.get_data(filename_test)
    if cfg.TRAIN.FLAG:
        filename_train = '%s/train' % (datadir)
        dataset.train = dataset.get_data(filename_train)
        ckt_logs_dir = "ckt_logs/%s/%s_%s" % \
            (cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
        mkdir_p(ckt_logs_dir)
    else:
        s_tmp = cfg.TRAIN.PRETRAINED_MODEL
        ckt_logs_dir = s_tmp[:s_tmp.find('.ckpt')]

    model = CondGAN(lr_imsize=int(dataset.image_shape[0] /
                                  dataset.hr_lr_ratio),
                    hr_lr_ratio=dataset.hr_lr_ratio)

    algo = CondGANTrainer(model=model,
                          dataset=dataset,
                          ckt_logs_dir=ckt_logs_dir)

    if cfg.TRAIN.FLAG:
        algo.train()
    else:
        ''' For every input text embedding/sentence in the
        training and test datasets, generate cfg.TRAIN.NUM_COPY
        images with randomness from noise z and conditioning augmentation.'''
        algo.evaluate()
Ejemplo n.º 6
0
from trainer import CondGANTrainer
from misc.get_configs import parse_args
from misc.utils import mkdir_p

if __name__ == "__main__":
    args = parse_args()
    print(args)
    now = datetime.datetime.now()
    timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')

    dataset = TextDataset(datadir='datasets/' + args.dataset + '/')

    print("Dataset created!")
    dataset.train = dataset.get_data()

    model = CondGAN(args, image_shape=dataset.image_shape)
    print("model created!")

    # if args.for_training:
    ckt_logs_dir = "ckt_logs/%s" % \
        ("{}_logs".format(args.dataset))
    res_dir = "retrieved_res/%s" % \
        ("{}_res".format(args.dataset))
    mkdir_p(ckt_logs_dir)
    mkdir_p(res_dir)
    with open(ckt_logs_dir + '/args.txt', 'w') as fid:
        fid.write(str(args) + '\n')

    algo = CondGANTrainer(args,
                          model=model,
                          dataset=dataset,