Ejemplo n.º 1
0
# convert loss to bits/dim
bits_per_dim = loss_gen[0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) *
                              args.batch_size)
bits_per_dim_test = loss_gen_test[0] / (args.nr_gpu * np.log(2.) *
                                        np.prod(obs_shape) * args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                             obs_shape[1],
                                             max_ratio=1.0)
#train_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                            obs_shape[1],
                                            max_ratio=1.0)
sample_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 16. / 32)

# sample from the model
# def sample_from_model(sess, data=None):
#     if data is not None and type(data) is not tuple:
#         x = data
#     x = np.cast[np.float32]((x - 127.5) / 127.5)
#     x = np.split(x, args.nr_gpu)
#     h = [x[i].copy() for i in range(args.nr_gpu)]
#     for i in range(args.nr_gpu):
#         h[i] = uf.mask_inputs(h[i], test_mgen)
#     feed_dict = {shs[i]: h[i] for i in range(args.nr_gpu)}
#     #x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
#     x_gen = [h[i][:,:,:,:3].copy() for i in range(args.nr_gpu)]
#     m_gen = [h[i][:,:,:,-1].copy() for i in range(args.nr_gpu)]
#     #assert m_gen[0]==m_gen[-1], "we currently assume all masks are the same during sampling"
Ejemplo n.º 2
0
    optimizer = tf.group(
        nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95,
                        mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) *
                              args.batch_size)
bits_per_dim_test = loss_gen_test[0] / (args.nr_gpu * np.log(2.) *
                                        np.prod(obs_shape) * args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                             obs_shape[1],
                                             max_ratio=1.0)
#train_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 0.5)
sample_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 24. / 64)


def sample_from_model(sess, data=None, **params):
    if type(data) is tuple:
        x, y = data
    else:
        x = data
        y = None
    x = np.cast[np.float32]((x - 127.5) / 127.5)  ## preprocessing

    if args.use_coordinates:
        g = grid.generate_grid((x.shape[1], x.shape[2]), batch_size=x.shape[0])
        xg = np.concatenate([x, g], axis=-1)
        xg, _ = uf.random_crop_images(xg,
# convert loss to bits/dim
bits_per_dim = loss_gen[0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) *
                              args.batch_size)
bits_per_dim_test = loss_gen_test[0] / (args.nr_gpu * np.log(2.) *
                                        np.prod(obs_shape) * args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                             obs_shape[1],
                                             max_ratio=1.0)
#train_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                            obs_shape[1],
                                            max_ratio=1.0)
sample_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 0.875)


def sample_from_model(sess, data=None, **params):
    if type(data) is tuple:
        x, y = data
    else:
        x = data
        y = None
    x = np.cast[np.float32]((x - 127.5) / 127.5)  ## preprocessing

    if 'use_coordinates' in params and params['use_coordinates']:
        g = grid.generate_grid((x.shape[1], x.shape[2]), batch_size=x.shape[0])
        if 'x_hats' in params:
            x_hats = params['x_hats']
            x_hats = (x_hats * 2.) - 1.
Ejemplo n.º 4
0
                                   size=64)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    # init
    sess.run(initializer)

    if FLAGS.load_params:
        ckpt_file = FLAGS.save_dir + '/params_' + 'celeba' + '.ckpt'
        print('restoring parameters from', ckpt_file)
        saver.restore(sess, ckpt_file)

    train_mgen = m.RandomRectangleMaskGenerator(64, 64, max_ratio=0.75)
    test_mgen = m.CenterMaskGenerator(64, 64, 0.5)

    max_num_epoch = 100
    for epoch in range(max_num_epoch):
        tt = time.time()
        ls, mses, klds = [], [], []
        for data in train_data:
            feed_dict = make_feed_dict(data, train_mgen)
            l, mse, kld, _ = sess.run([loss, MSE, KLD, train_step],
                                      feed_dict=feed_dict)
            ls.append(l)
            mses.append(mse)
            klds.append(kld)
        train_loss, train_mse, train_kld = np.mean(ls), np.mean(mses), np.mean(
            klds)
Ejemplo n.º 5
0
test_data = celeba_data.DataLoader(FLAGS.data_dir, 'valid', FLAGS.batch_size*FLAGS.nr_gpu, shuffle=False, size=128)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    # init
    sess.run(initializer)

    if FLAGS.load_params:
        ckpt_file = FLAGS.save_dir + '/params_' + 'celeba' + '.ckpt'
        print('restoring parameters from', ckpt_file)
        saver.restore(sess, ckpt_file)

    train_mgen = m.RandomRectangleMaskGenerator(128, 128, max_ratio=0.75)
    test_mgen = m.CenterMaskGenerator(128, 128, 0.5)

    max_num_epoch = 1000
    for epoch in range(max_num_epoch):
        tt = time.time()
        ls, mses, klds = [], [], []
        for data in train_data:
            feed_dict = make_feed_dict(data, train_mgen)
            l, mse, kld, _ = sess.run([loss, MSE, KLD, train_step], feed_dict=feed_dict)
            ls.append(l)
            mses.append(mse)
            klds.append(kld)
        train_loss, train_mse, train_kld = np.mean(ls), np.mean(mses), np.mean(klds)

        ls, mses, klds = [], [], []
        for data in test_data:
Ejemplo n.º 6
0
with tf.device('/gpu:0'):
    for i in range(1,args.nr_gpu):
        loss_gen[0] += loss_gen[i]
        loss_gen_test[0] += loss_gen_test[i]
        for j in range(len(grads[0])):
            grads[0][j] += grads[i][j]
    # training op
    optimizer = tf.group(nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95, mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)
bits_per_dim_test = loss_gen_test[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], ratio=24./64)
#test_mgen = um.CenterEllipseMaskGenerator(obs_shape[0], obs_shape[1])
#test_mgen = um.RectangleMaskGenerator(obs_shape[0], obs_shape[1], (28, 62, 38, 2))
#test_mgen = um.RectangleMaskGenerator(obs_shape[0], obs_shape[1], (54, 52, 64, 12))

# sample from the model
def sample_from_model(sess, data=None):
    if data is not None and type(data) is not tuple:
        x = data
    x = np.cast[np.float32]((x - 127.5) / 127.5)
    x = np.split(x, args.nr_gpu)
    h = [x[i].copy() for i in range(args.nr_gpu)]
    for i in range(args.nr_gpu):
        h[i] = uf.mask_inputs(h[i], test_mgen)
    feed_dict = {shs[i]: h[i] for i in range(args.nr_gpu)}
    #x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
Ejemplo n.º 7
0
from utils import plotting
import utils.mfunc as uf

test_data = celeba_data.DataLoader(v.FLAGS.data_dir,
                                   'valid',
                                   v.FLAGS.batch_size * v.FLAGS.nr_gpu,
                                   shuffle=False,
                                   size=128)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    v.load_vae(sess, v.saver)

    test_mgen = m.CenterMaskGenerator(128, 128, 100. / 128.)

    data = next(test_data)
    data = uf.mask_inputs(data, test_mgen)[:, :, :, :3]

    img_tile = plotting.img_tile(data[:25],
                                 aspect_ratio=1.0,
                                 border_color=1.0,
                                 stretch=True)
    img = plotting.plot_img(img_tile, title=v.FLAGS.data_set + ' samples')
    plotting.plt.savefig(
        os.path.join("plots", '%s_vae_original.png' % (v.FLAGS.data_set)))

    feed_dict = v.make_feed_dict(data, test_mgen)
    sample_x = sess.run(v.x_hats, feed_dict=feed_dict)
    sample_x = np.concatenate(sample_x, axis=0)
Ejemplo n.º 8
0
    optimizer = tf.group(
        nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95,
                        mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) *
                              args.batch_size)
bits_per_dim_test = loss_gen_test[0] / (args.nr_gpu * np.log(2.) *
                                        np.prod(obs_shape) * args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                             obs_shape[1],
                                             max_ratio=1.0)
#train_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 0.75)
sample_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 0.75)


def sample_from_model(sess, data=None, **params):
    if type(data) is tuple:
        x, y = data
    else:
        x = data
        y = None
    x = np.cast[np.float32]((x - 127.5) / 127.5)  ## preprocessing

    if args.use_coordinates:
        g = grid.generate_grid((x.shape[1], x.shape[2]), batch_size=x.shape[0])
        xg = np.concatenate([x, g], axis=-1)
        xg, _ = uf.random_crop_images(xg,
Ejemplo n.º 9
0
with tf.device('/gpu:0'):
    for i in range(1,args.nr_gpu):
        loss_gen[0] += loss_gen[i]
        loss_gen_test[0] += loss_gen_test[i]
        for j in range(len(grads[0])):
            grads[0][j] += grads[i][j]
    # training op
    optimizer = tf.group(nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95, mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)
bits_per_dim_test = loss_gen_test[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])

# sample from the model
def sample_from_model(sess, data=None):
    if data is not None and type(data) is not tuple:
        x = data
    x = np.cast[np.float32]((x - 127.5) / 127.5)
    x = np.split(x, args.nr_gpu)
    h = [x[i].copy() for i in range(args.nr_gpu)]
    for i in range(args.nr_gpu):
        h[i] = uf.mask_inputs(h[i], test_mgen)
    feed_dict = {shs[i]: h[i] for i in range(args.nr_gpu)}
    #x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
    x_gen = [h[i][:,:,:,:3].copy() for i in range(args.nr_gpu)]
    m_gen = [h[i][:,:,:,-1].copy() for i in range(args.nr_gpu)]
    #assert m_gen[0]==m_gen[-1], "we currently assume all masks are the same during sampling"