Exemple #1
0
        for j in range(len(grads[0])):
            grads[0][j] += grads[i][j]
    # training op
    optimizer = tf.group(
        nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95,
                        mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) *
                              args.batch_size)
bits_per_dim_test = loss_gen_test[0] / (args.nr_gpu * np.log(2.) *
                                        np.prod(obs_shape) * args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                             obs_shape[1],
                                             max_ratio=1.0)
#train_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 0.5)
sample_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 24. / 64)


def sample_from_model(sess, data=None, **params):
    if type(data) is tuple:
        x, y = data
    else:
        x = data
        y = None
    x = np.cast[np.float32]((x - 127.5) / 127.5)  ## preprocessing

    if args.use_coordinates:
Exemple #2
0
        for j in range(len(grads[0])):
            grads[0][j] += grads[i][j]
    # training op
    optimizer = tf.group(
        nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95,
                        mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) *
                              args.batch_size)
bits_per_dim_test = loss_gen_test[0] / (args.nr_gpu * np.log(2.) *
                                        np.prod(obs_shape) * args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                             obs_shape[1],
                                             max_ratio=1.0)
#train_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                            obs_shape[1],
                                            max_ratio=1.0)
sample_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 16. / 32)

# sample from the model
# def sample_from_model(sess, data=None):
#     if data is not None and type(data) is not tuple:
#         x = data
#     x = np.cast[np.float32]((x - 127.5) / 127.5)
#     x = np.split(x, args.nr_gpu)
#     h = [x[i].copy() for i in range(args.nr_gpu)]
#     for i in range(args.nr_gpu):
Exemple #3
0
                                   shuffle=False,
                                   size=64)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    # init
    sess.run(initializer)

    if FLAGS.load_params:
        ckpt_file = FLAGS.save_dir + '/params_' + 'celeba' + '.ckpt'
        print('restoring parameters from', ckpt_file)
        saver.restore(sess, ckpt_file)

    train_mgen = m.RandomRectangleMaskGenerator(64, 64, max_ratio=0.75)
    test_mgen = m.CenterMaskGenerator(64, 64, 0.5)

    max_num_epoch = 100
    for epoch in range(max_num_epoch):
        tt = time.time()
        ls, mses, klds = [], [], []
        for data in train_data:
            feed_dict = make_feed_dict(data, train_mgen)
            l, mse, kld, _ = sess.run([loss, MSE, KLD, train_step],
                                      feed_dict=feed_dict)
            ls.append(l)
            mses.append(mse)
            klds.append(kld)
        train_loss, train_mse, train_kld = np.mean(ls), np.mean(mses), np.mean(
            klds)
Exemple #4
0
tf_lr = tf.placeholder(tf.float32, shape=[])
with tf.device('/gpu:0'):
    for i in range(1,args.nr_gpu):
        loss_gen[0] += loss_gen[i]
        loss_gen_test[0] += loss_gen_test[i]
        for j in range(len(grads[0])):
            grads[0][j] += grads[i][j]
    # training op
    optimizer = tf.group(nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95, mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)
bits_per_dim_test = loss_gen_test[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], ratio=24./64)
#test_mgen = um.CenterEllipseMaskGenerator(obs_shape[0], obs_shape[1])
#test_mgen = um.RectangleMaskGenerator(obs_shape[0], obs_shape[1], (28, 62, 38, 2))
#test_mgen = um.RectangleMaskGenerator(obs_shape[0], obs_shape[1], (54, 52, 64, 12))

# sample from the model
def sample_from_model(sess, data=None):
    if data is not None and type(data) is not tuple:
        x = data
    x = np.cast[np.float32]((x - 127.5) / 127.5)
    x = np.split(x, args.nr_gpu)
    h = [x[i].copy() for i in range(args.nr_gpu)]
    for i in range(args.nr_gpu):
        h[i] = uf.mask_inputs(h[i], test_mgen)
    feed_dict = {shs[i]: h[i] for i in range(args.nr_gpu)}
Exemple #5
0
import utils.mfunc as uf

test_data = celeba_data.DataLoader(v.FLAGS.data_dir,
                                   'test',
                                   v.FLAGS.batch_size * v.FLAGS.nr_gpu,
                                   shuffle=False,
                                   size=64)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    v.load_vae(sess, v.saver)

    test_mgen = m.RandomRectangleMaskGenerator(64,
                                               64,
                                               min_ratio=0.25,
                                               max_ratio=0.5)

    data = next(test_data)
    data = next(test_data)

    # img_tile = plotting.img_tile(data[:25], aspect_ratio=1.0, border_color=1.0, stretch=True)
    # img = plotting.plot_img(img_tile, title=v.FLAGS.data_set + ' samples')
    # plotting.plt.savefig(os.path.join("plots",'%s_vae_original.png' % (v.FLAGS.data_set)))

    feed_dict = v.make_feed_dict(data, test_mgen)
    ret = sess.run([v.mxs] + v.x_hats, feed_dict=feed_dict)
    mx, x_hat = ret[0], ret[1:]
    mx, x_hat = np.concatenate(mx, axis=0), np.concatenate(x_hat, axis=0)
    mx = np.rint(mx * 255.)
    x_hat = np.rint(x_hat * 255.)
            grads[0][j] += grads[i][j]
    # training op
    optimizer = tf.group(
        nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95,
                        mom2=0.9995), maintain_averages_op)

# convert loss to bits/dim
bits_per_dim = loss_gen[0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) *
                              args.batch_size)
bits_per_dim_test = loss_gen_test[0] / (args.nr_gpu * np.log(2.) *
                                        np.prod(obs_shape) * args.batch_size)

# mask generator
train_mgen = um.RandomRectangleMaskGenerator(obs_shape[0],
                                             obs_shape[1],
                                             min_ratio=0.5,
                                             max_ratio=1.0,
                                             batch_same=True)
#train_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1])
test_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 0.75)
sample_mgen = um.CenterMaskGenerator(obs_shape[0], obs_shape[1], 0.75)


def sample_from_model(sess, data=None, **params):
    if type(data) is tuple:
        x, y = data
    else:
        x = data
        y = None
    x = np.cast[np.float32]((x - 127.5) / 127.5)  ## preprocessing