Example #1
0
def noisy_meas(batch_size):
    ''' data '''
    def preprocess_fn(img):
        crop_size = 108
        re_size = 64
        img = tf.image.crop_to_bounding_box(img, (218 - crop_size) // 2,
                                            (178 - crop_size) // 2, crop_size,
                                            crop_size)
        img = tf.to_float(
            tf.image.resize_images(
                img, [re_size, re_size],
                method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
        return img

    img_paths = glob.glob('./data/test/{}.jpg'.format(PARAMS.img_no))
    noisefile_path = './meas/noise3d_var{}_seed{}.npy'.format(
        noise_var, seed_no)

    if not os.path.exists(noisefile_path):
        print('**** noise file does not exist... creating new one ****')
        noise_mat3d = np.reshape(
            np.random.multivariate_normal(mean=np.zeros((dim_like)),
                                          cov=np.eye(dim_like, dim_like) *
                                          noise_var,
                                          size=1), (64, 64, 3))
        np.save(noisefile_path, noise_mat3d)
    noise_mat3d = np.load(noisefile_path)
    data_pool = utils.DiskImageData(img_paths,
                                    batch_size,
                                    shape=[218, 178, 3],
                                    preprocess_fn=preprocess_fn)
    noisy_meas3d = data_pool.batch()[0, :, :, :] + noise_mat3d
    noisy_mat4d = np.tile(noisy_meas3d,
                          (batch_size, 1, 1, 1)).astype(np.float32)
    return data_pool.batch()[0, :, :, :], noisy_mat4d
Example #2
0
def preprocess_fn(img):
    crop_size = 108
    re_size = 64
    img = tf.image.crop_to_bounding_box(img, (218 - crop_size) // 2,
                                        (178 - crop_size) // 2, crop_size,
                                        crop_size)
    img = tf.to_float(
        tf.image.resize_images(
            img, [re_size, re_size],
            method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
    return img


img_paths = glob.glob('./data/img_align_celeba/*.jpg')
data_pool = utils.DiskImageData(img_paths,
                                batch_size,
                                shape=[218, 178, 3],
                                preprocess_fn=preprocess_fn)
""" graphs """
with tf.device('/gpu:%d' % gpu_id):
    ''' models '''
    generator = models.generator
    discriminator = models.discriminator
    ''' graph '''
    # inputs
    real = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])

    # generate
    fake = generator(z, reuse=False)

    # dicriminate
Example #3
0
import models_128x128 as models
""" param """
epoch = 25
batch_size = 64
lr = 0.0002
z_dim = 100
gpu_id = 0
''' data '''

# def preprocess_fn(img):
#     re_size = 128
#     img = tf.to_float(tf.image.resize_images(img, [re_size, re_size], method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
#     return img

img_paths = glob.glob('./data/Omni/*.jpg')
data_pool = utils.DiskImageData(img_paths, batch_size, shape=[128, 128, 3])
""" graphs """
with tf.device('/gpu:%d' % 0):
    ''' models '''
    generator = models.generator
    discriminator = models.discriminator
    ''' graph '''
    # inputs
    real = tf.placeholder(tf.float32, shape=[None, 128, 128, 3])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])

    # generate
    fake = generator(z, reuse=False)

    # dicriminate
    r_logit = discriminator(real, reuse=False)
Example #4
0
def train():
    alpha_span = 800000
    batch_size = 32
    ckpt_dir = './checkpoints/wgp'
    n_gen = 1
    n_critic = 1
    it_start = 0
    #epoch = 20*(alpha_span * 2 // (2*4936)) # 4936 is number of images
    
    def preprocess_fn(img):
        img = tf.image.resize_images(img, [target_size, target_size], method=tf.image.ResizeMethod.AREA) / 127.5 -1
        return img

    def preprocess_fn_dummy(img):
        img = tf.image.resize_images(img, [final_size, final_size], method=tf.image.ResizeMethod.AREA) / 127.5 -1
        return img
    
    # dataset
    img_paths = glob.glob('./imgs/faces/*.png')
    data_pool = utils.DiskImageData(5, img_paths, batch_size//2, shape=[640, 640, 3], preprocess_fn=preprocess_fn)
    data_pool_dummy = utils.DiskImageData(7, img_paths, 1, shape=[640, 640, 3], preprocess_fn=preprocess_fn_dummy)    
    batch_epoch = len(data_pool) // (batch_size * 1)#n_critic

    # build graph
    print('Building a graph ...')
    nodes = build(batch_size)
    # session
    sess = utils.session()
    saver = tf.train.Saver()
    # summary
    summary_writer = tf.summary.FileWriter('./summaries/wgp/', sess.graph)
    utils.mkdir(ckpt_dir + '/')

    print('Initializing all variables ...')
    sess.run(tf.global_variables_initializer())
    
    # run final size session for storing all variables to be used into the optimizer
    print('Running final size dummy session ...')
    #if target_size == initial_size and len(sys.argv) <= 3:
    #    _ = sess.run([nodes['dummy']['d']], feed_dict=get_ipt(2, final_size, 1.0, data_pool_dummy ,z_dim, nodes['dummy']['input'] ))
    #    _ = sess.run([nodes['dummy']['g']], feed_dict=get_ipt(2, final_size, 1.0, data_pool_dummy ,z_dim, nodes['dummy']['input'] ))
        
    # load checkpoint
    if len(sys.argv)>3 and sys.argv[2]=='resume':
        print ('Loading the checkpoint ...')
        saver.restore(sess, ckpt_dir+'/model.ckpt')
        it_start = 1 + int(sys.argv[3])
    last_saved_iter = it_start - 1

    ''' train '''
    for it in range(it_start, 9999999999):
        # fade alpha
        alpha_ipt = it / (alpha_span / batch_size)
        if alpha_ipt > 1 or target_size == initial_size:
            alpha_ipt = 1.0
        print('Alpha : %f' % alpha_ipt)
        alpha_ipt = 1.0
        
        # train D
        for i in range(n_critic):
            d_summary_opt, _ = sess.run([nodes['summaries']['d'], nodes['product']['d']],\
                feed_dict=get_ipt(batch_size, target_size, alpha_ipt, data_pool, z_dim, nodes['product']['input']))
        summary_writer.add_summary(d_summary_opt, it)

        # train G
        for i in range(n_gen):
            g_summary_opt, _ = sess.run([nodes['summaries']['g'], nodes['product']['g']],\
                feed_dict=get_ipt(batch_size, target_size, alpha_ipt, data_pool, z_dim, nodes['product']['input']))
        summary_writer.add_summary(g_summary_opt, it)
        
        # display
        epoch = it // batch_epoch
        it_epoch = it % batch_epoch + 1
        if it % 1 == 0:
            print("iter : %8d, epoch : (%3d) (%5d/%5d) _ resume point : %d" % (it, epoch, it_epoch, batch_epoch,last_saved_iter))

        # sample
        if (it + 1) % batch_epoch == 0:
            f_sample_opt = sess.run(nodes['sample'], feed_dict=get_ipt_for_sample(batch_size, z_dim, nodes['product']['input']))
            f_sample_opt = np.clip(f_sample_opt, -1, 1)
            save_dir = './sample_images_while_training/wgp/'
            utils.mkdir(save_dir + '/')
            osz = int(math.sqrt(batch_size))+1
            utils.imwrite(utils.immerge(f_sample_opt, osz, osz), '%s/iter_(%d).png' % (save_dir, it))
            
        # save
        if (it + 1) % batch_epoch == 0:
            last_saved_iter = it
            save_path = saver.save(sess, '%s/model.ckpt' % (ckpt_dir))
            print('Model saved in file: %s' % save_path)
Example #5
0
def main(args):
    #
    save_dir = os.path.join(args.save_dir, args.model_type)
    img_dir = os.path.join(args.img_dir, args.model_type)
    log_dir = os.path.join(args.log_dir, args.model_type)
    train_dir = args.train_dir

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    if not os.path.exists(img_dir):
        os.makedirs(img_dir)

    summary_writer = tf.summary.FileWriter(log_dir)
    config_proto = utils.get_config_proto()

    sess = tf.Session(config=config_proto)
    model = VQVAE(args, sess, name="vqvae")

    img_paths = glob.glob('data/img_align_celeba/*.jpg')
    train_paths, test_paths = train_test_split(img_paths,
                                               test_size=0.1,
                                               random_state=args.random_seed)
    celeba = utils.DiskImageData(sess,
                                 train_paths,
                                 args.batch_size,
                                 shape=[218, 178, 3])
    total_batch = celeba.num_examples // args.batch_size

    for epoch in range(1, args.nb_epoch + 1):
        print "Epoch %d start with learning rate %f" % (
            epoch, model.learning_rate.eval(sess))
        print "- " * 50
        epoch_start_time = time.time()
        step_start_time = epoch_start_time
        for i in range(1, total_batch + 1):
            global_step = sess.run(model.global_step)
            x_batch = celeba.next_batch()

            _, loss, rec_loss, vq, commit, global_step, summaries = model.train(
                x_batch)
            summary_writer.add_summary(summaries, global_step)

            if i % args.print_step == 0:
                print "epoch %d, step %d, loss %f, rec_loss %f, vq_loss %f, commit_loss %f, time %.2fs" \
                    % (epoch, global_step, loss, rec_loss, vq, commit, time.time()-step_start_time)
                step_start_time = time.time()

        if args.anneal and epoch >= args.anneal_start:
            sess.run(model.lr_decay_op)

        if epoch % args.save_epoch == 0:
            x_batch = celeba.next_batch()
            x_recon = model.reconstruct(x_batch)
            utils.save_images(x_batch, [10, 10],
                              os.path.join(img_dir, "rawImage%s.jpg" % epoch))
            utils.save_images(
                x_recon, [10, 10],
                os.path.join(img_dir, "reconstruct%s.jpg" % epoch))

    model.saver.save(sess, os.path.join(save_dir, "model.ckpt"))
    print "Model stored...."
Example #6
0
gpu_id = 3

''' data '''
# you should prepare your own data in ./data/img_align_celeba
# celeba original size is [218, 178, 3]


def preprocess_fn(img):
    crop_size = 108
    re_size = 64
    img = tf.image.crop_to_bounding_box(img, (218 - crop_size) // 2, (178 - crop_size) // 2, crop_size, crop_size)
    img = tf.to_float(tf.image.resize_images(img, [re_size, re_size], method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
    return img

img_paths = glob.glob('./data/img_align_celeba/img_align_celeba/')
data_pool = utils.DiskImageData('./data/img_align_celeba/img_align_celeba/', batch_size, shape=[218, 178, 3], preprocess_fn=preprocess_fn)


""" graphs """
with tf.device('/gpu:%d' % gpu_id):
    ''' models '''
    generator = models.generator
    discriminator = models.discriminator

    ''' graph '''
    # inputs
    real = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])

    # generate
    fake = generator(z, reuse=False)
log_path = './logs/' + dataset_name + '_' + GAN_type + '_' + index
ckpt_path = './checkpoints/' + dataset_name + '_' + GAN_type + '_' + index
utils.mkdir(sample_path + '/')
utils.mkdir(ckpt_path + '/')


def preprocess_fn(img):
    # re_size = 64
    # img = tf.to_float(tf.image.resize_images(img, [re_size, re_size], method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
    img = tf.to_float(img) / 127.5 - 1

    return img


data_pool = utils.DiskImageData(img_paths,
                                batch_size,
                                shape=input_image_shape,
                                preprocess_fn=preprocess_fn)

with tf.device('/gpu:%d' % 0):
    generator = models.generator
    discriminator = models.discriminator

    # inputs
    real = tf.placeholder(tf.float32,
                          shape=[
                              None, input_image_shape[0], input_image_shape[1],
                              input_image_shape[2]
                          ])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])

    # generate
    crop_size = 200  # use the center part
    re_size = 64
    img = tf.image.crop_to_bounding_box(img, (origin_height - crop_size) // 2,
                                        (origin_with - crop_size) // 2,
                                        crop_size, crop_size)
    img = tf.to_float(
        tf.image.resize_images(
            img, [re_size, re_size],
            method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
    return img


img_paths = glob.glob('/media/gt/_dde_data/Datasets/CASIA-maxpy-clean/*/*.jpg')
#img_paths = glob.glob('../../datasets/CASIA-maxpy-clean/*/*.jpg')
data_pool = utils.DiskImageData(img_paths,
                                batch_size,
                                shape=[origin_height, origin_with, 3],
                                preprocess_fn=preprocess_fn)
""" graphs """
with tf.device('/gpu:%d' % gpu_id):
    """ models """
    generator = models.generator
    discriminator = models.discriminator
    """ graph """
    # inputs
    real = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])

    # generate
    fake = generator(z, reuse=False, with_bn=with_bn)

    # dicriminate