예제 #1
0
def sample_from_model(sess, data, fill_region=None, mgen=None):
    if mgen is None:
        mgen = get_generator('random_rec', args.img_size)
    data = np.cast[np.float32]((data - 127.5) / 127.5)
    ds = np.split(data, args.nr_gpu)
    feed_dict = {is_trainings[i]: False for i in range(args.nr_gpu)}
    feed_dict.update({dropout_ps[i]: 0. for i in range(args.nr_gpu)})
    feed_dict.update({ xs[i]:ds[i] for i in range(args.nr_gpu) })
    masks_np = [mgen.gen(args.batch_size) for i in range(args.nr_gpu)]
    if "output" in args.use_mask_for:
        if args.phase=='pvae':
            feed_dict.update({masks[i]:np.zeros_like(masks_np[i]) for i in range(args.nr_gpu)})
        elif args.phase=='ce':
            feed_dict.update({masks[i]:masks_np[i] for i in range(args.nr_gpu)})
    if "input" in args.use_mask_for:
        feed_dict.update({input_masks[i]:masks_np[i] for i in range(args.nr_gpu)})

    x_gen = [ds[i].copy() for i in range(args.nr_gpu)]
    #x_gen = [x_gen[i]*np.stack([tm for t in range(3)], axis=-1) for i in range(args.nr_gpu)]
    for yi in range(args.img_size):
        for xi in range(args.img_size):
            if fill_region is None or fill_region[yi, xi]==0:
                feed_dict.update({x_bars[i]:x_gen[i] for i in range(args.nr_gpu)})
                x_hats = sess.run([pvaes[i].x_hat for i in range(args.nr_gpu)], feed_dict=feed_dict)
                for i in range(args.nr_gpu):
                    x_gen[i][:, yi, xi, :] = x_hats[i][:, yi, xi, :]
    return np.concatenate(x_gen, axis=0)
예제 #2
0
def make_feed_dict(data, is_training=True, dropout_p=0.5, mgen=None):
    if mgen is None:
        mgen = get_generator('random rec', args.img_size)
    data = np.cast[np.float32]((data - 127.5) / 127.5)
    ds = np.split(data, args.nr_gpu)
    feed_dict = {is_trainings[i]: is_training for i in range(args.nr_gpu)}
    feed_dict.update({dropout_ps[i]: dropout_p for i in range(args.nr_gpu)})
    feed_dict.update({xs[i]: ds[i] for i in range(args.nr_gpu)})
    feed_dict.update({x_bars[i]: ds[i] for i in range(args.nr_gpu)})
    feed_dict.update({
        random_indices[i]: generate_random_indices(args.batch_size, args.z_dim)
        for i in range(args.nr_gpu)
    })
    masks_np = [mgen.gen(args.batch_size) for i in range(args.nr_gpu)]
    if "output" in args.use_mask_for:
        if args.phase == 'pvae':
            feed_dict.update({
                masks[i]: np.zeros_like(masks_np[i])
                for i in range(args.nr_gpu)
            })
        elif args.phase == 'ce':
            feed_dict.update(
                {masks[i]: masks_np[i]
                 for i in range(args.nr_gpu)})
    if "input" in args.use_mask_for:
        feed_dict.update(
            {input_masks[i]: masks_np[i]
             for i in range(args.nr_gpu)})
    return feed_dict
eval_keys = ['total loss', 'nll loss', 'reg loss', 'bits per dim', 'mi']
learner.construct_models(model_cls=ControlledConvPixelVAE, model_opt=model_opt, learning_rate=args.learning_rate, trainable_params=trainable_params, eval_keys=eval_keys)

#
initializer = tf.global_variables_initializer()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    sess.run(initializer)
    learner.set_session(sess)
    if args.mode == 'train':
        kwargs = {
            "train_mgen": get_generator(args.mask_type, size=args.img_size), ###
            "sample_mgen": get_generator(args.mask_type, size=args.img_size), ###
            "max_num_epoch": 200,
            "save_interval": args.save_interval,
            "restore": args.load_params,
        }
        if args.phase == 'ce':
            if not args.one_stage:
                learner.preload(from_dir=args.pvae_dir, var_list=get_trainable_variables(["forward_pixel_cnn", "conv_encoder", "conv_decoder"]))
        learner.train(**kwargs)
    elif args.mode == 'test':
        learner.eval(which_set='test', mgen=get_generator('bottom half', size=args.img_size), generate_samples=True)
    elif args.mode == 'inpainting':
        layout = (10, 10)
        same_inputs = False
        use_mask_at = "{0}_{1}.npz".format(args.mask_type, args.data_set)
    if args.mode == 'train':
        pass
    elif args.mode == 'test':
        if args.mask_type == 'random blobs':
            mtype = 'blob'
        elif args.mask_type == 'random rec':
            mtype = 'rec'
        elif args.mask_type == 'pepper':
            mtype = 'pepper'

        if 'mnist' in args.data_set:
            learner.preload(
                from_dir="/data/ziz/jxu/save_dirs/model_mnist_conv_vae_{0}".
                format(mtype),
                var_list=None)
        elif 'celeba' in args.data_set:
            learner.preload(
                from_dir="/data/ziz/jxu/save_dirs/model_celeba_conv_vae_{0}".
                format(mtype),
                var_list=None)
        elif 'church_outdoor' in args.data_set:
            learner.preload(
                from_dir=
                "/data/ziz/jxu/save_dirs/model_church_outdoor_conv_vae_{0}".
                format(mtype),
                var_list=None)
        learner.eval(which_set='test',
                     mgen=get_generator(args.mask_type, size=args.img_size),
                     generate_samples=False,
                     restore=False)
예제 #5
0
    model_cls=ConvVAE,
    model_opt=model_opt,
    learning_rate=args.learning_rate,
    eval_keys=['total loss', 'nll loss', 'reg loss', 'bits per dim'])
#
initializer = tf.global_variables_initializer()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    sess.run(initializer)
    learner.set_session(sess)
    if args.mode == 'train':
        kwargs = {
            "train_mgen": get_generator(args.mask_type,
                                        size=args.img_size),  ###
            "sample_mgen": get_generator(args.mask_type,
                                         size=args.img_size),  ###
            "max_num_epoch": 200,
            "save_interval": args.save_interval,
            "restore": args.load_params,
        }
        learner.train(**kwargs)
    elif args.mode == 'test':
        learner.eval(which_set='test',
                     mgen=get_generator(args.mask_type, size=args.img_size),
                     generate_samples=False)
    elif args.mode == 'inpainting':
        layout = (10, 10)
        same_inputs = False
        use_mask_at = "{0}_{1}.npz".format(args.mask_type, args.data_set)
예제 #6
0
    if args.load_params:
        ckpt_file = args.save_dir + '/params_' + args.data_set + '.ckpt'
        print('restoring parameters from', ckpt_file)
        saver.restore(sess, ckpt_file)

    if args.phase == 'ce':
        # restore part of parameters
        var_list = get_trainable_variables(
            ["conv_encoder", "conv_decoder", "conv_pixel_cnn"])
        saver1 = tf.train.Saver(var_list=var_list)
        ckpt_file = args.load_dir + '/params_' + args.data_set + '.ckpt'
        print('restoring parameters from', ckpt_file)
        saver1.restore(sess, ckpt_file)

    sample_mgen = get_generator('center', args.img_size)
    if args.phase == 'pvae':
        fill_region = get_generator('full', args.img_size).gen(1)[0]
    elif args.phase == 'ce':
        fill_region = sample_mgen.gen(1)[0]

    max_num_epoch = 200
    for epoch in range(max_num_epoch + 1):
        tt = time.time()
        for data in train_data:
            feed_dict = make_feed_dict(data, is_training=True, dropout_p=0.5)
            sess.run(train_step, feed_dict=feed_dict)

        for data in eval_data:
            feed_dict = make_feed_dict(data, is_training=False, dropout_p=0.)
            recorder.evaluate(sess, feed_dict)
learner = FullyObservedLearner(args.nr_gpu, args.save_dir, args.img_size, exp_name=args.exp_name)
learner.load_data(args.data_set, args.batch_size*args.nr_gpu, use_debug_mode=args.debug)
learner.construct_models(model_cls=BidirectionalPixelCNN, model_opt=model_opt, learning_rate=args.learning_rate, trainable_params=None, eval_keys=['total loss', 'nll loss', 'bits per dim'])

initializer = tf.global_variables_initializer()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    sess.run(initializer)
    learner.set_session(sess)

    if args.mode == 'train':
        kwargs = {
            "train_mgen": get_generator(args.mask_type, size=args.img_size),
            "sample_mgen": get_generator(args.mask_type, size=args.img_size),
            "max_num_epoch": 200,
            "save_interval": args.save_interval,
            "restore": args.load_params,
        }
        learner.train(**kwargs)
    elif args.mode == 'test':
        learner.eval(which_set='test', mgen=get_generator(args.mask_type, size=args.img_size), generate_samples=False)
    elif args.mode == 'inpainting':
        layout = (10, 10)
        same_inputs = False # True
        use_mask_at = "{0}_{1}.npz".format(args.mask_type, args.data_set)
        learner.eval(which_set='test', mgen=get_generator(args.mask_type, size=args.img_size), generate_samples=True, layout=layout, same_inputs=same_inputs, use_mask_at=use_mask_at)
예제 #8
0
#     trainable_params = ["forward_pixel_cnn", "reverse_pixel_cnn"]
trainable_params = ["inference"]
learner.construct_models(model_cls=ControlledConvPixelVAEIWAE, model_opt=model_opt, learning_rate=args.learning_rate, trainable_params=trainable_params, eval_keys=['total loss', 'bits per dim'])

#
initializer = tf.global_variables_initializer()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    sess.run(initializer)
    learner.set_session(sess)
    if args.mode == 'train':
        kwargs = {
            "train_mgen": get_generator(args.mask_type, size=args.img_size), ###
            "sample_mgen": get_generator(args.mask_type, size=args.img_size), ###
            "max_num_epoch": 200,
            "save_interval": None,
            "restore": args.load_params,
        }
        if 'mnist' in args.data_set:
            learner.preload(from_dir="/data/ziz/jxu/save_dirs/model_mnist_controlled_pixel_vae_mmd_{0}_rec".format(args.phase), var_list=get_trainable_variables(["reverse_pixel_cnn", "forward_pixel_cnn", "conv_encoder", "conv_decoder"]))
        else:
            learner.preload(from_dir="/data/ziz/jxu/save_dirs/model_celeba_controlled_pixel_vae_mmd_{0}_rec".format(args.phase), var_list=get_trainable_variables(["reverse_pixel_cnn", "forward_pixel_cnn", "conv_encoder", "conv_decoder"]))
        learner.train(**kwargs)

    elif args.mode == 'test':
        if args.mask_type == 'random blobs':
            mtype = 'blob'
        elif args.mask_type == 'random rec':
예제 #9
0
                         model_opt=model_opt,
                         learning_rate=args.learning_rate,
                         trainable_params=None,
                         eval_keys=['total loss', 'nll loss', 'bits per dim'])

initializer = tf.global_variables_initializer()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    sess.run(initializer)
    learner.set_session(sess)
    if args.mode == 'train':
        kwargs = {
            "train_mgen": get_generator('full', size=args.img_size),
            "sample_mgen": get_generator(args.mask_type, size=args.img_size),
            "max_num_epoch": 200,
            "save_interval": args.save_interval,
            "restore": args.load_params,
        }
        learner.train(**kwargs)
    elif args.mode == 'test':
        learner.eval(which_set='test',
                     mgen=get_generator(args.mask_type, size=args.img_size),
                     generate_samples=False)
    elif args.mode == 'inpainting':
        layout = (10, 10)
        same_inputs = False
        use_mask_at = "{0}_{1}.npz".format(args.mask_type, args.data_set)
        learner.eval(which_set='test',
예제 #10
0

initializer = tf.global_variables_initializer()
saver = tf.train.Saver()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    sess.run(initializer)

    ckpt_file = args.save_dir + '/params_' + args.data_set + '.ckpt'
    print('restoring parameters from', ckpt_file)
    saver.restore(sess, ckpt_file)

    sample_mgen = get_generator('bottom quarter', args.img_size)
    fill_region = sample_mgen.gen(1)[0]
    # sample_mgen = get_generator('transparent', args.img_size)
    # fill_region = get_generator('full', args.img_size).gen(1)[0]
    data = next(test_data)

    from blocks.helpers import broadcast_masks_np
    data = data.astype(np.float32) * broadcast_masks_np(fill_region, 3)

    test_data.reset()
    # vdata = np.cast[np.float32]((data - 127.5) / 127.5)
    # visualize_samples(vdata, "/data/ziz/jxu/gpu-results/show_original.png", layout=[8,8])

    img = []
    for i in [7]:  #[5,7,8]: #[5, 7, 8, 18, 27, 44, 74, 77]:
        sample_x = latent_traversal(sess,