def test_squashed_gaussian(model_class): """ Test run with squashed Gaussian (notably entropy computation) """ model = model_class('MlpPolicy', 'Pendulum-v0', use_sde=True, n_steps=100, policy_kwargs=dict(squash_output=True)) model.learn(500) gaussian_mean = th.rand(N_SAMPLES, N_ACTIONS) dist = SquashedDiagGaussianDistribution(N_ACTIONS) _, log_std = dist.proba_distribution_net(N_FEATURES) dist = dist.proba_distribution(gaussian_mean, log_std) actions = dist.get_actions() assert th.max(th.abs(actions)) <= 1.0
class Actor(BasePolicy): """ Actor network (policy) for SAC. :param observation_space: (gym.spaces.Space) Obervation space :param action_space: (gym.spaces.Space) Action space :param net_arch: ([int]) Network architecture :param features_extractor: (nn.Module) Network to extract features (a CNN when using images, a nn.Flatten() layer otherwise) :param features_dim: (int) Number of features :param activation_fn: (Type[nn.Module]) Activation function :param use_sde: (bool) Whether to use State Dependent Exploration or not :param log_std_init: (float) Initial value for the log standard deviation :param full_std: (bool) Whether to use (n_features x n_actions) parameters for the std instead of only (n_features,) when using gSDE. :param sde_net_arch: ([int]) Network architecture for extracting features when using gSDE. If None, the latent features from the policy will be used. Pass an empty list to use the states as features. :param use_expln: (bool) Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure a positive standard deviation (cf paper). It allows to keep variance above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough. :param clip_mean: (float) Clip the mean output when using gSDE to avoid numerical instability. :param normalize_images: (bool) Whether to normalize images or not, dividing by 255.0 (True by default) :param device: (Union[th.device, str]) Device on which the code should run. """ def __init__(self, observation_space: gym.spaces.Space, action_space: gym.spaces.Space, net_arch: List[int], features_extractor: nn.Module, features_dim: int, activation_fn: Type[nn.Module] = nn.ReLU, use_sde: bool = False, log_std_init: float = -3, full_std: bool = True, sde_net_arch: Optional[List[int]] = None, use_expln: bool = False, clip_mean: float = 2.0, normalize_images: bool = True, device: Union[th.device, str] = 'auto'): super(Actor, self).__init__(observation_space, action_space, features_extractor=features_extractor, normalize_images=normalize_images, device=device, squash_output=True) # Save arguments to re-create object at loading self.use_sde = use_sde self.sde_features_extractor = None self.sde_net_arch = sde_net_arch self.net_arch = net_arch self.features_dim = features_dim self.activation_fn = activation_fn self.log_std_init = log_std_init self.sde_net_arch = sde_net_arch self.use_expln = use_expln self.full_std = full_std self.clip_mean = clip_mean action_dim = get_action_dim(self.action_space) latent_pi_net = create_mlp(features_dim, -1, net_arch, activation_fn) self.latent_pi = nn.Sequential(*latent_pi_net) last_layer_dim = net_arch[-1] if len(net_arch) > 0 else features_dim if self.use_sde: latent_sde_dim = last_layer_dim # Separate feature extractor for gSDE if sde_net_arch is not None: self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor(features_dim, sde_net_arch, activation_fn) self.action_dist = StateDependentNoiseDistribution(action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True) self.mu, self.log_std = self.action_dist.proba_distribution_net(latent_dim=last_layer_dim, latent_sde_dim=latent_sde_dim, log_std_init=log_std_init) # Avoid numerical issues by limiting the mean of the Gaussian # to be in [-clip_mean, clip_mean] if clip_mean > 0.0: self.mu = nn.Sequential(self.mu, nn.Hardtanh(min_val=-clip_mean, max_val=clip_mean)) else: self.action_dist = SquashedDiagGaussianDistribution(action_dim) self.mu = nn.Linear(last_layer_dim, action_dim) self.log_std = nn.Linear(last_layer_dim, action_dim) def _get_data(self) -> Dict[str, Any]: data = super()._get_data() data.update(dict( net_arch=self.net_arch, features_dim=self.features_dim, activation_fn=self.activation_fn, use_sde=self.use_sde, log_std_init=self.log_std_init, full_std=self.full_std, sde_net_arch=self.sde_net_arch, use_expln=self.use_expln, features_extractor=self.features_extractor, clip_mean=self.clip_mean )) return data def get_std(self) -> th.Tensor: """ Retrieve the standard deviation of the action distribution. Only useful when using gSDE. It corresponds to ``th.exp(log_std)`` in the normal case, but is slightly different when using ``expln`` function (cf StateDependentNoiseDistribution doc). :return: (th.Tensor) """ msg = 'get_std() is only available when using gSDE' assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg return self.action_dist.get_std(self.log_std) def reset_noise(self, batch_size: int = 1) -> None: """ Sample new weights for the exploration matrix, when using gSDE. :param batch_size: (int) """ msg = 'reset_noise() is only available when using gSDE' assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg self.action_dist.sample_weights(self.log_std, batch_size=batch_size) def get_action_dist_params(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]: """ Get the parameters for the action distribution. :param obs: (th.Tensor) :return: (Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]) Mean, standard deviation and optional keyword arguments. """ features = self.extract_features(obs) latent_pi = self.latent_pi(features) mean_actions = self.mu(latent_pi) if self.use_sde: latent_sde = latent_pi if self.sde_features_extractor is not None: latent_sde = self.sde_features_extractor(features) return mean_actions, self.log_std, dict(latent_sde=latent_sde) # Unstructured exploration (Original implementation) log_std = self.log_std(latent_pi) # Original Implementation to cap the standard deviation log_std = th.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX) return mean_actions, log_std, {} def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: mean_actions, log_std, kwargs = self.get_action_dist_params(obs) # Note: the action is squashed return self.action_dist.actions_from_params(mean_actions, log_std, deterministic=deterministic, **kwargs) def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]: mean_actions, log_std, kwargs = self.get_action_dist_params(obs) # return action and associated log prob return self.action_dist.log_prob_from_params(mean_actions, log_std, **kwargs) def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: return self.forward(observation, deterministic)