Пример #1
0
 def denormalize(self, v):
     mean = ptu.from_numpy(self.mean)
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         mean = mean.unsqueeze(0)
         std = std.unsqueeze(0)
     return mean + v * std
Пример #2
0
 def normalize(self, v, clip_range=None):
     if clip_range is None:
         clip_range = self.default_clip_range
     mean = ptu.from_numpy(self.mean)
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         # Unsqueeze along the batch use automatic broadcasting
         mean = mean.unsqueeze(0)
         std = std.unsqueeze(0)
     return torch.clamp((v - mean) / std, -clip_range, clip_range)
Пример #3
0
 def _reconstruct_img(self, flat_img):
     latent_distribution_params = self.vae.encode(
         ptu.from_numpy(flat_img.reshape(1, -1)))
     reconstructions, _ = self.vae.decode(latent_distribution_params[0])
     imgs = ptu.get_numpy(reconstructions)
     imgs = imgs.reshape(1, self.input_channels, self.imsize, self.imsize)
     return imgs[0]
Пример #4
0
 def denormalize_scale(self, v):
     """
     Only denormalize the scale. Do not add the mean.
     """
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         std = std.unsqueeze(0)
     return v * std
Пример #5
0
 def normalize_scale(self, v):
     """
     Only normalize the scale. Do not subtract the mean.
     """
     std = ptu.from_numpy(self.std)
     if v.dim() == 2:
         std = std.unsqueeze(0)
     return v / std
Пример #6
0
def reconstruct_img(flat_img):
    latent_distribution_params = vae.encode(
        ptu.from_numpy(flat_img.reshape(1, -1)).cuda())
    reconstructions, _ = vae.decode(latent_distribution_params[0])
    imgs = ptu.get_numpy(reconstructions)
    imgs = imgs.reshape(1, vae.input_channels, vae.imsize,
                        vae.imsize).transpose(0, 3, 2, 1)  # BCWH -> BHWC
    img = cv2.cvtColor(imgs[0], cv2.COLOR_RGB2BGR)
    return img
Пример #7
0
def get_latent(raw_image):
    """Get latent variables (mean vector)"""
    image = cv2.resize(raw_image, (vae.imsize, vae.imsize))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = normalize_image(image)
    # swap order and reshape
    flat_img = torch.from_numpy(image).permute(2, 1,
                                               0).flatten(start_dim=1).numpy()
    latent_distribution_params = vae.encode(
        ptu.from_numpy(flat_img.reshape(1, -1)).cuda())
    latents = ptu.get_numpy(latent_distribution_params[0])
    return latents
Пример #8
0
 def _update_info(self, info, obs):
     latent_distribution_params = self.vae.encode(
         ptu.from_numpy(obs[self.vae_input_observation_key].reshape(1, -1)))
     latent_obs, logvar = ptu.get_numpy(latent_distribution_params[0])[0], \
                          ptu.get_numpy(latent_distribution_params[1])[0]
     # assert (latent_obs == obs['latent_observation']).all()
     latent_goal = self.desired_goal['latent_desired_goal']
     dist = latent_goal - latent_obs
     var = np.exp(logvar.flatten())
     var = np.maximum(var, self.reward_min_variance)
     err = dist * dist / 2 / var
     mdist = np.sum(err)  # mahalanobis distance
     info["vae_mdist"] = mdist
     info["vae_success"] = 1 if mdist < self.epsilon else 0
     info["vae_dist"] = np.linalg.norm(dist, ord=self.norm_order)
     info["vae_dist_l1"] = np.linalg.norm(dist, ord=1)
     info["vae_dist_l2"] = np.linalg.norm(dist, ord=2)
Пример #9
0
def _elem_or_tuple_to_variable(elem_or_tuple):
    if isinstance(elem_or_tuple, tuple):
        return tuple(_elem_or_tuple_to_variable(e) for e in elem_or_tuple)
    return ptu.from_numpy(elem_or_tuple).float()
Пример #10
0
def torch_ify(np_array_or_other):
    if isinstance(np_array_or_other, np.ndarray):
        return ptu.from_numpy(np_array_or_other)
    else:
        return np_array_or_other
Пример #11
0
 def _encode(self, imgs):
     latent_distribution_params = self.vae.encode(ptu.from_numpy(imgs))
     return ptu.get_numpy(latent_distribution_params[0])
Пример #12
0
 def _decode(self, latents):
     reconstructions, _ = self.vae.decode(ptu.from_numpy(latents))
     decoded = ptu.get_numpy(reconstructions)
     return decoded