Ejemplo n.º 1
0
    def ppo_policy_loss(
        self,
        advantages: torch.Tensor,
        log_probs: torch.Tensor,
        old_log_probs: torch.Tensor,
        loss_masks: torch.Tensor,
    ) -> torch.Tensor:
        """
        Evaluate PPO policy loss.
        :param advantages: Computed advantages.
        :param log_probs: Current policy probabilities
        :param old_log_probs: Past policy probabilities
        :param loss_masks: Mask for losses. Used with LSTM to ignore 0'ed out experiences.
        """
        advantage = advantages.unsqueeze(-1)

        decay_epsilon = self.hyperparameters.epsilon
        r_theta = torch.exp(log_probs - old_log_probs)
        p_opt_a = r_theta * advantage
        p_opt_b = (
            torch.clamp(r_theta, 1.0 - decay_epsilon, 1.0 + decay_epsilon) *
            advantage)
        policy_loss = -1 * ModelUtils.masked_mean(torch.min(p_opt_a, p_opt_b),
                                                  loss_masks)
        return policy_loss
Ejemplo n.º 2
0
 def trust_region_policy_loss(
     advantages: torch.Tensor,
     log_probs: torch.Tensor,
     old_log_probs: torch.Tensor,
     loss_masks: torch.Tensor,
     epsilon: float,
 ) -> torch.Tensor:
     """
     Evaluate policy loss clipped to stay within a trust region. Used for PPO and POCA.
     :param advantages: Computed advantages.
     :param log_probs: Current policy probabilities
     :param old_log_probs: Past policy probabilities
     :param loss_masks: Mask for losses. Used with LSTM to ignore 0'ed out experiences.
     """
     advantage = advantages.unsqueeze(-1)
     r_theta = torch.exp(log_probs - old_log_probs)
     p_opt_a = r_theta * advantage
     p_opt_b = torch.clamp(r_theta, 1.0 - epsilon,
                           1.0 + epsilon) * advantage
     policy_loss = -1 * ModelUtils.masked_mean(torch.min(p_opt_a, p_opt_b),
                                               loss_masks)
     return policy_loss