Exemple #1
0
def test_images_to_z(config_name: str, vae_model_descr: str, vae_model: Path,
                     **vae_kwargs):

    config = CONFIGS[config_name]
    episodes = config['episodes']
    episode_length = config['episode_length']

    vae = ConvVAE(is_training=False, reuse=False, gpu_mode=False, **vae_kwargs)
    vae.load_json(vae_model)
    batch_size = vae.batch_size
    z_size = vae.z_size

    images_path = f'../data/{config_name}_imgs.npz'
    z_path = f'../data/{config_name}_latent_{vae_model_descr}.npz'

    data = np.load(images_path)['arr_0'] / 255.

    all_z = np.load(z_path)['arr_0']

    for _ in range(20):
        rand_i = np.random.randint(0, len(data))
        ep = rand_i // episode_length
        ep_step = rand_i % episode_length
        z = all_z[ep, ep_step]
        padded = np.zeros((batch_size, z_size))
        padded[0] = z
        recon = vae.decode(padded)[0]
        img = data[rand_i]
        zipped = np.zeros((64, 128, 3))
        zipped[:, :64] = img
        zipped[:, 64:] = recon
        plt.imshow(zipped)
        plt.show()
Exemple #2
0
def images_to_z(config_name: str, vae_model_descr: str, vae_model: Path,
                **vae_kwargs):

    config = CONFIGS[config_name]
    episodes = config['episodes']
    episode_length = config['episode_length']

    vae = ConvVAE(is_training=False, reuse=False, gpu_mode=False, **vae_kwargs)
    vae.load_json(vae_model)
    batch_size = vae.batch_size
    z_size = vae.z_size

    images_path = f'../data/{config_name}_imgs.npz'
    z_path = f'../data/{config_name}_latent_{vae_model_descr}.npz'

    data = np.load(images_path)['arr_0'] / 255.
    output_z = np.zeros((len(data), z_size))

    n_batches = int(np.ceil(len(data) / batch_size))
    for b in range(n_batches):
        batch = data[batch_size * b:batch_size * (b + 1)]
        actual_bs = len(batch)
        if actual_bs < batch_size:
            padded = np.zeros((batch_size, ) + data.shape[1:])
            padded[:actual_bs] = batch
            batch = padded
        batch_z = vae.encode(batch)[:actual_bs]
        output_z[batch_size * b:batch_size * (b + 1)] = batch_z

    output_z = output_z.reshape((episodes, episode_length, -1))
    np.savez_compressed(z_path, output_z)
Exemple #3
0
 def __load_vae(self):
     vae = None
     if self.vae_path is not None:
         vae = ConvVAE(z_size=self.z_size,
                       is_training=False,
                       reuse=False,
                       gpu_mode=False)
         vae.load_json(self.vae_path)
     return vae
 def __init__(self,
              vae_model_path: Path,
              env_model: BaseModel = None,
              z_filter=None,
              z_size: int = 16,
              batch_size: int = 32,
              *args,
              **kwargs):
     super().__init__(*args, **kwargs)
     vae = ConvVAE(z_size=z_size,
                   batch_size=batch_size,
                   is_training=False,
                   reuse=False,
                   gpu_mode=False)
     vae.load_json(vae_model_path)
     self.vae = vae
     self.vae_input_shape = (64, 64, 3)
     self.env_model = env_model
     self.z_filter = z_filter
Exemple #5
0
def main():
    """ INIT DATA """
    os.environ[
        "CUDA_VISIBLE_DEVICES"] = "0"  # can just override for multi-gpu systems
    z_size = 16
    batch_size = 32
    data_path = "../data"
    dataset = np.load(os.path.join(data_path, 'push_sphere_v0_imgs.npz'))
    dataset = dataset['arr_0'] / 255.
    """ LOAD MODEL """
    vae = ConvVAE(z_size=z_size,
                  batch_size=batch_size,
                  is_training=False,
                  reuse=False,
                  gpu_mode=False)
    vae.load_json("tf_vae/kl2rl1-z16-b250-push_sphere_v0vae-fetch199.json")

    all_original_images = []
    all_predicted_images = []

    for i in tqdm(range(2, 12)):

        batch_images = dataset[i * batch_size:(i + 1) * batch_size]
        pred_images = vae.decode(vae.encode(batch_images))

        all_original_images.append(batch_images)
        all_predicted_images.append(pred_images)

    all_original_images = np.concatenate(all_original_images)
    all_predicted_images = np.concatenate(all_predicted_images)

    n_images, rows, cols, chs = all_original_images.shape
    zipped = np.zeros((n_images, rows, cols * 2, chs))

    zipped[:, :, :cols] = all_original_images
    zipped[:, :, cols:] = all_predicted_images
    zipped = (zipped * 255.).astype(np.uint8)

    gif_path = './out/vae_test.mov'
    imageio.mimwrite(gif_path, zipped, quality=10)
Exemple #6
0
total_length = len(x_train)
num_batches = int(np.floor(total_length / batch_size))
for setting in all_settings:

    final_model_path = "tf_vae/{}vae-fetch{}.json".format(
        setting.name, NUM_EPOCH - 1)
    if os.path.exists(final_model_path):
        print("Model for setting {} exists. Skipping...".format(setting.name))
        continue

    print("Start setting: {}".format(setting.name))
    vae = ConvVAE(z_size=setting.z_size,
                  batch_size=batch_size,
                  learning_rate=learning_rate,
                  kl_tolerance=kl_tolerance,
                  is_training=True,
                  reuse=False,
                  gpu_mode=False,
                  reconstruction_option=setting.reconstruct_opt,
                  kl_option=setting.kl_opt)
    # vae.load_json("tf_vae/kl2-rl1-b100-colordataasetvae-fetch200.json")
    # train loop:
    train_step = train_loss = r_loss = kl_loss = None

    train_loss_list = r_loss_list = kl_loss_list = loss_grads_list = []
    smoothing = 0.9
    init_disentanglement = disentanglement = setting.b  # B
    max_capacity = 25
    capacity_change_duration = num_batches * 100  # arbitrary: = 100 epochs of disentanglement
    c = 0
learning_rate = 0.001
data_path = "../data"
dataset = np.load(os.path.join(data_path, 'fetch_sphere_big_imgs.npz'))
dataset = dataset['arr_0']
np.random.shuffle(dataset)
dataset = dataset / 255.
new_data = []
for i, d in enumerate(dataset):
    new_data.append(cv2.resize(d, (64, 64), interpolation=cv2.INTER_AREA))
dataset = np.array(new_data)
train_ratio = int(0.8 * len(dataset))
# x_test = dataset[train_ratio:]
x_test = dataset
total_length = len(x_test)
num_batches = int(np.floor(total_length / batch_size))

""" LOAD MODEL """
vae = ConvVAE(z_size=z_size, batch_size=batch_size, learning_rate=learning_rate, kl_tolerance=kl_tolerance,
              is_training=True, reuse=False, gpu_mode=False, reconstruction_option=1,
              kl_option=2)
# vae.load_json("best_models/kl2-rl1-b10vae-fetch950.json")
vae.load_json("best_models/kl1-rl1-b100vae-fetch950.json")
print(num_batches)
all_z = []
for i in range(num_batches):
    batch_z = vae.encode(x_test[i * batch_size: (i + 1) * batch_size])
    all_z.extend(batch_z)
all_z = np.array(all_z)
variances = np.var(all_z, axis=0)
print(variances)