Пример #1
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    with tf.Session() as sess:
        vae = VAE(sess,
                  image_size=FLAGS.image_size,
                  batch_size=FLAGS.batch_size,
                  output_size=FLAGS.output_size,
                  sigma=FLAGS.sigma,
                  dataset_name=FLAGS.dataset,
                  checkpoint_dir=FLAGS.checkpoint_dir,
                  sample_dir=FLAGS.sample_dir)

        if FLAGS.is_train:
            vae.train(FLAGS)
        else:
            vae.load(FLAGS.checkpoint_dir)
Пример #2
0
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "vae_{0}_{1}_{2}_{3}".format(FLAGS.gaps, FLAGS.arch,
                                                      FLAGS.latent_dim,
                                                      FLAGS.beta)
    image_shape = [
        int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')
    ]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs['data'] = '../../data' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)

    z_dist = Gaussian(FLAGS.latent_dim)
    output_dim = reduce(mul, image_shape, 1)
    output_dist = Gaussian(output_dim)

    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth = True
    run_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=run_config)

    vae = VAE(
        session=sess,
        output_dist=output_dist,
        z_dist=z_dist,
        arch=FLAGS.arch,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        beta=FLAGS.beta,
        gaps=FLAGS.gaps,
        vis_reconst=FLAGS.visualize_reconstruct,
        vis_disent=FLAGS.visualize_disentangle,
        n_disentangle_samples=FLAGS.n_disentangle_samples,
    )

    if FLAGS.train:
        data_manager = TeapotsDataManager(dirs['data'],
                                          FLAGS.batch_size,
                                          image_shape,
                                          shuffle=True,
                                          gaps=FLAGS.gaps,
                                          file_ext=FLAGS.file_ext,
                                          train_fract=0.8,
                                          inf=True)
        vae.train_iter, vae.dev_iter, vae.test_iter = data_manager.get_iterators(
        )

        n_iters_per_epoch = data_manager.n_train // data_manager.batch_size
        FLAGS.stats_interval = int(FLAGS.stats_interval * n_iters_per_epoch)
        FLAGS.ckpt_interval = int(FLAGS.ckpt_interval * n_iters_per_epoch)
        n_iters = int(FLAGS.epochs * n_iters_per_epoch)

        vae.train(n_iters, n_iters_per_epoch, FLAGS.stats_interval,
                  FLAGS.ckpt_interval)

    if FLAGS.save_codes:
        b_size = 500  #large batch, forward prop only
        data_manager = TeapotsDataManager(dirs['data'],
                                          b_size,
                                          image_shape,
                                          shuffle=False,
                                          gaps=False,
                                          file_ext=FLAGS.file_ext,
                                          train_fract=1.,
                                          inf=False)
        data_manager.set_divisor_batch_size()
        vae.train_iter, vae.dev_iter, vae.test_iter = data_manager.get_iterators(
        )

        vae.session.run(tf.global_variables_initializer())
        saved_step = vae.load()
        assert saved_step > 1, "A trained model is needed to encode the data!"

        codes = []
        for batch_num, (img_batch, _) in enumerate(vae.train_iter):
            code = vae.encode(img_batch)  #[batch_size, reg_latent_dim]
            codes.append(code)
            if batch_num < 5 or batch_num % 100 == 0:
                print(("Batch number {0}".format(batch_num)))

        codes = np.vstack(codes)
        filename = os.path.join(dirs['codes'], "codes_" + FLAGS.exp_name)
        np.save(filename, codes)
        print(("Codes saved to: {0}".format(filename)))