Exemple #1
0
 def get_debug_batch(self, train=True):
     dataset = self.train_dataset if train else self.test_dataset
     X, Y = dataset
     ind = np.random.randint(0, Y.shape[0], self.batch_size)
     X = X[ind, :]
     Y = Y[ind, :]
     return ptu.from_numpy(X), ptu.from_numpy(Y)
Exemple #2
0
 def denormalize(self, v):
     mean = ptu.from_numpy(self.mean)
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         mean = mean.unsqueeze(0)
         std = std.unsqueeze(0)
     return mean + v * std
Exemple #3
0
 def normalize(self, v, clip_range=None):
     if clip_range is None:
         clip_range = self.default_clip_range
     mean = ptu.from_numpy(self.mean)
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         # Unsqueeze along the batch use automatic broadcasting
         mean = mean.unsqueeze(0)
         std = std.unsqueeze(0)
     return torch.clamp((v - mean) / std, -clip_range, clip_range)
Exemple #4
0
 def get_dataset_stats(self, data):
     torch_input = ptu.from_numpy(normalize_image(data))
     mus, log_vars = self.model.encode(torch_input)
     mus = ptu.get_numpy(mus)
     mean = np.mean(mus, axis=0)
     std = np.std(mus, axis=0)
     return mus, mean, std
Exemple #5
0
 def _reconstruct_img(self, flat_img):
     latent_distribution_params = self.vae.encode(
         ptu.from_numpy(flat_img.reshape(1, -1)))
     reconstructions, _ = self.vae.decode(latent_distribution_params[0])
     imgs = ptu.get_numpy(reconstructions)
     imgs = imgs.reshape(1, self.input_channels, self.imsize, self.imsize)
     return imgs[0]
Exemple #6
0
 def _update_info(self, info, obs):
     latent_distribution_params = self.vae.encode(
         ptu.from_numpy(obs[self.vae_input_observation_key].reshape(1, -1)))
     latent_obs = ptu.get_numpy(latent_distribution_params[0])[0]
     latent_goal = self.desired_goal['latent_desired_goal']
     dist = latent_goal - latent_obs
     info["vae_dist"] = np.linalg.norm(dist, ord=2)
Exemple #7
0
 def denormalize_scale(self, v):
     """
     Only denormalize the scale. Do not add the mean.
     """
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         std = std.unsqueeze(0)
     return v * std
Exemple #8
0
 def normalize_scale(self, v):
     """
     Only normalize the scale. Do not subtract the mean.
     """
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         std = std.unsqueeze(0)
     return v / std
Exemple #9
0
 def get_batch(self, train=True):
     dataset = self.train_dataset if train else self.test_dataset
     ind = np.random.randint(0, len(dataset), self.batch_size)
     samples = normalize_image(dataset[ind, :])
     if self.normalize:
         samples = ((samples - self.train_data_mean) + 1) / 2
     if self.background_subtract:
         samples = samples - self.train_data_mean
     return ptu.from_numpy(samples)
Exemple #10
0
    def _dump_imgs_and_reconstructions(self, idxs, filename):
        imgs = []
        recons = []
        for i in idxs:
            img_np = self.train_dataset[i]
            img_torch = ptu.from_numpy(normalize_image(img_np))
            recon, *_ = self.model(img_torch)

            img = img_torch.view(self.input_channels, self.imsize, self.imsize)
            rimg = recon.view(self.input_channels, self.imsize, self.imsize)
            imgs.append(img)
            recons.append(rimg)
        all_imgs = torch.stack(imgs + recons)
        save_file = osp.join(logger.get_snapshot_dir(), filename)
        save_image(
            all_imgs.data,
            save_file,
            nrow=4,
        )
def _elem_or_tuple_to_variable(elem_or_tuple):
    if isinstance(elem_or_tuple, tuple):
        return tuple(_elem_or_tuple_to_variable(e) for e in elem_or_tuple)
    return ptu.from_numpy(elem_or_tuple).float()
Exemple #12
0
 def _reconstruction_squared_error_np_to_np(self, np_imgs):
     torch_input = ptu.from_numpy(normalize_image(np_imgs))
     recons, *_ = self.model(torch_input)
     error = torch_input - recons
     return ptu.get_numpy((error**2).sum(dim=1))
Exemple #13
0
 def _kl_np_to_np(self, np_imgs):
     torch_input = ptu.from_numpy(normalize_image(np_imgs))
     mu, log_var = self.model.encode(torch_input)
     return ptu.get_numpy(
         -torch.sum(1 + log_var - mu.pow(2) - log_var.exp(), dim=1))
Exemple #14
0
 def _encode(self, imgs):
     latent_distribution_params = self.vae.encode(ptu.from_numpy(imgs))
     return ptu.get_numpy(latent_distribution_params[0])
Exemple #15
0
 def _decode(self, latents):
     reconstructions, _ = self.vae.decode(ptu.from_numpy(latents))
     decoded = ptu.get_numpy(reconstructions)
     return decoded