Пример #1
0
def load_hyper_and_data(path_to_trained_models, dataset_name, samples_n,
                        run_with_sample):
    hyper = load_hyper(path_to_trained_models, samples_n)
    tf.random.set_seed(seed=hyper['seed'])
    data = load_vae_dataset(dataset_name=dataset_name,
                            batch_n=hyper['batch_n'],
                            epochs=hyper['epochs'],
                            run_with_sample=run_with_sample,
                            architecture=hyper['architecture'],
                            hyper=hyper)
    # train_dataset, test_dataset, _, hyper = data
    _, test_dataset, _, hyper = data
    epoch = hyper['epochs']
    return test_dataset, hyper, epoch
Пример #2
0
 def test_data_loading(self):
     dataset_name = 'celeb_a'
     batch_n = 64
     epochs = 100
     run_with_sample = True
     hyper = {'batch_n': batch_n, 'epochs': epochs}
     output = load_vae_dataset(dataset_name=dataset_name,
                               batch_n=batch_n,
                               epochs=epochs,
                               run_with_sample=run_with_sample,
                               hyper=hyper)
     train_dataset, test_dataset, test_images, hyper = output
     plt.imshow(test_images[0, :, :, :])
     plt.show()
     self.assertEqual(True, True)
Пример #3
0
def sample_from_posterior(path_to_results,
                          hyper_file,
                          dataset_name,
                          weights_file,
                          model_type,
                          run_with_sample=True):
    with open(file=path_to_results + hyper_file, mode='rb') as f:
        hyper = pickle.load(f)

    data = load_vae_dataset(dataset_name=dataset_name,
                            batch_n=hyper['batch_n'],
                            epochs=hyper['epochs'],
                            run_with_sample=run_with_sample,
                            architecture=hyper['architecture'],
                            hyper=hyper)
    train_dataset, test_dataset, np_test_images, hyper = data

    vae_opt = construct_nets_and_optimizer(hyper=hyper, model_type=model_type)
    vae_opt.nets.load_weights(filepath=path_to_results + weights_file)

    samples_n, total_test_images, im_idx = 100, 10_000, 0
    # samples_n, total_test_images, im_idx = 100, 19_962, 0
    shape = (total_test_images, samples_n, hyper['num_of_discrete_var'])
    diff = np.zeros(shape=shape)

    for test_image in test_dataset:
        z, x_logit, params = vae_opt.perform_fwd_pass(test_image)
        if model_type.find('Planar') > 0:
            dist = determine_distribution(model_type=model_type,
                                          params=params,
                                          temp=hyper['temp'],
                                          samples_n=samples_n,
                                          planar_flow=vae_opt.nets.planar_flow)
        else:
            dist = determine_distribution(model_type=model_type,
                                          params=params,
                                          temp=hyper['temp'],
                                          samples_n=samples_n)
        dist.generate_sample()
        psi = dist.psi.numpy()
        for i in range(psi.shape[0]):
            for k in range(psi.shape[3]):
                diff[im_idx, :, k] = calculate_distance_to_simplex(
                    psi=psi[i, :, :, k],
                    argmax_locs=np.argmax(psi[i, :, :, k], axis=0))
            im_idx += 1
    return diff
Пример #4
0
def run_vae(hyper, run_with_sample):
    tf.random.set_seed(seed=hyper['seed'])
    data = load_vae_dataset(dataset_name=hyper['dataset_name'],
                            batch_n=hyper['batch_n'],
                            epochs=hyper['epochs'],
                            run_with_sample=run_with_sample,
                            architecture=hyper['architecture'],
                            hyper=hyper)
    train_dataset, test_dataset, test_images, hyper = data

    vae_opt = construct_nets_and_optimizer(hyper=hyper,
                                           model_type=hyper['model_type'])

    train_vae(vae_opt=vae_opt,
              hyper=hyper,
              train_dataset=train_dataset,
              test_dataset=test_dataset,
              test_images=test_images,
              check_every=hyper['check_every'])
Пример #5
0
def sample_from_posterior(path_to_results,
                          hyper_file,
                          dataset_name,
                          weights_file,
                          model_type,
                          run_with_sample=True):
    with open(file=path_to_results + hyper_file, mode='rb') as f:
        hyper = pickle.load(f)

    data = load_vae_dataset(dataset_name=dataset_name,
                            batch_n=hyper['batch_n'],
                            epochs=hyper['epochs'],
                            run_with_sample=run_with_sample,
                            architecture=hyper['architecture'])
    (train_dataset, test_dataset, test_images, hyper['batch_n'],
     hyper['epochs'], image_size, hyper['iter_per_epoch']) = data

    model = setup_model(hyper=hyper, image_size=image_size)
    model.load_weights(filepath=path_to_results + weights_file)
    vae_opt = setup_vae_optimizer(model=model,
                                  hyper=hyper,
                                  model_type=model_type)

    samples_n, total_test_images, im_idx = 100, 10_000, 0
    shape = (total_test_images, samples_n, hyper['num_of_discrete_var'])
    diff = np.zeros(shape=shape)

    for test_image in test_dataset:
        z, x_logit, params = vae_opt.perform_fwd_pass(test_image)
        dist = determine_distribution(model_type=model_type,
                                      params=params,
                                      temp=hyper['temp'],
                                      samples_n=samples_n)
        dist.do_reparameterization_trick()
        ψ = dist.psi.numpy()
        for i in range(ψ.shape[0]):
            for k in range(ψ.shape[3]):
                diff[im_idx, :, k] = calculate_distance_to_simplex(
                    ψ=ψ[i, :, :, k],
                    argmax_locs=np.argmax(ψ[i, :, :, k], axis=0))
            im_idx += 1
    return diff
Пример #6
0
    def test_data_loading(self):
        # dataset_name = 'omniglot'
        # batch_n = 64
        # epochs = 100
        # run_with_sample = False
        # output = load_vae_dataset(dataset_name=dataset_name, batch_n=batch_n, epochs=epochs,
        #                           run_with_sample=run_with_sample)
        # train_dataset, test_dataset, test_images, batch_n, epochs, image_size, iter_per_epoch = output

        dataset_name = 'celeb_a'
        batch_n = 64
        epochs = 100
        run_with_sample = True
        output = load_vae_dataset(dataset_name=dataset_name,
                                  batch_n=batch_n,
                                  epochs=epochs,
                                  run_with_sample=run_with_sample)
        train_dataset, test_dataset, test_images, batch_n, epochs, image_size, iter_per_epoch = output
        plt.imshow(test_images[0, :, :, :])
        plt.show()
        self.assertEqual(True, True)
Пример #7
0
def run_vae(hyper, run_with_sample):
    data = load_vae_dataset(dataset_name=hyper['dataset_name'], batch_n=hyper['batch_n'],
                            epochs=hyper['epochs'], run_with_sample=run_with_sample,
                            architecture=hyper['architecture'])
    (train_dataset, test_dataset, test_images, hyper['batch_n'], hyper['epochs'],
     image_size, hyper['iter_per_epoch']) = data

    results_path = determine_path_to_save_results(model_type=hyper['model_type'],
                                                  dataset_name=hyper['dataset_name'])
    model = setup_model(hyper=hyper, image_size=image_size)

    vae_opt = setup_vae_optimizer(model=model, hyper=hyper, model_type=hyper['model_type'])

    writer, logger = start_all_logging_instruments(hyper=hyper, results_path=results_path,
                                                   test_images=test_images)

    train_vae_model(vae_opt=vae_opt, model=model, writer=writer, hyper=hyper, train_dataset=train_dataset,
                    test_dataset=test_dataset, logger=logger, results_path=results_path,
                    test_images=test_images)

    plot_grid_of_generated_digits(model=model, n_required=vae_opt.n_required, fig_size=vae_opt.n_required,
                                  filename=results_path + '/samples_grid.html', digit_size=image_size[0])
Пример #8
0
plots_path = './Results/Outputs/'
path_to_results = './Results/Current_Model/'
# hyper_file = 'hyper_planar2.pkl'
# weights_file = 'vae_planar2.h5'
# hyper_file = 'hyper_planar5.pkl'
# weights_file = 'vae_planar5.h5'
hyper_file = 'hyper_planar10.pkl'
weights_file = 'vae_planar10.h5'
# hyper_file = 'hyper_test.pkl'
# weights_file = 'vae_test.h5'
with open(file=path_to_results + hyper_file, mode='rb') as f:
    hyper = pickle.load(f)

data = load_vae_dataset(dataset_name=dataset_name,
                        batch_n=hyper['batch_n'],
                        epochs=hyper['epochs'],
                        run_with_sample=run_with_sample,
                        architecture=hyper['architecture'])
(train_dataset, test_dataset, test_images, hyper['batch_n'], hyper['epochs'],
 image_size, hyper['iter_per_epoch']) = data

model = setup_model(hyper=hyper, image_size=image_size)
model.load_weights(filepath=path_to_results + weights_file)
vae_opt = setup_vae_optimizer(model=model, hyper=hyper, model_type=model_type)

samples_n, total_test_images, im_idx = 100, 10_000, 0
shape = (total_test_images, samples_n, hyper['num_of_discrete_var'])
diff = np.zeros(shape=shape)
for test_image in test_dataset:
    z, x_logit, params = vae_opt.perform_fwd_pass(test_image)
    mu, xi = params
Пример #9
0
    1: {'model_dir': 'relax_igr', 'model_type': 'Relax_IGR'},
    2: {'model_dir': 'relax_gs', 'model_type': 'Relax_GS_Dis'},
}
select_case = 1
hyper_file = 'hyper.pkl'
weights_file = 'vae.h5'
model_type = models[select_case]['model_type']
path_to_trained_models += models[select_case]['model_dir'] + '/'

with open(file=path_to_trained_models + hyper_file, mode='rb') as f:
    hyper = pickle.load(f)

batch_n = hyper['batch_n']
tf.random.set_seed(seed=hyper['seed'])
data = load_vae_dataset(dataset_name=dataset_name, batch_n=batch_n, epochs=hyper['epochs'],
                        run_with_sample=False,
                        architecture=hyper['architecture'], hyper=hyper)
_, _, np_test_images, hyper = data
vae_opt = construct_nets_and_optimizer(hyper=hyper, model_type=model_type)
vae_opt.nets.load_weights(filepath=path_to_trained_models + weights_file)

x = tf.constant(np_test_images, dtype=tf.float32)
params = vae_opt.nets.encode(x)
vae_opt.offload_params(params)
one_hot = vae_opt.get_relax_variables_from_params(x, params)[-1]
x_logit = vae_opt.decode([one_hot])
recon_probs = tf.math.sigmoid(x_logit)
plt.figure(figsize=(5, 4), dpi=100)
for i in range(np_test_images.shape[0]):
    plt.subplot(5, 4, i + 1)
    plt.imshow(recon_probs[i, :, :, 0, 0], cmap='gray')