Exemple #1
0
 def process_fn(self, batch: Batch, buffer: ReplayBuffer,
                indice: np.ndarray) -> Batch:
     if self._rew_norm:
         mean, std = batch.rew.mean(), batch.rew.std()
         if not np.isclose(std, 0, 1e-2):
             batch.rew = (batch.rew - mean) / std
     v, v_, old_log_prob = [], [], []
     with torch.no_grad():
         for b in batch.split(self._batch, shuffle=False):
             v_.append(self.critic(b.obs_next))
             v.append(self.critic(b.obs))
             old_log_prob.append(self(b).dist.log_prob(
                 to_torch_as(b.act, v[0])))
     v_ = to_numpy(torch.cat(v_, dim=0))
     batch = self.compute_episodic_return(
         batch, v_, gamma=self._gamma, gae_lambda=self._lambda,
         rew_norm=self._rew_norm)
     batch.v = torch.cat(v, dim=0).flatten()  # old value
     batch.act = to_torch_as(batch.act, v[0])
     batch.logp_old = torch.cat(old_log_prob, dim=0)
     batch.returns = to_torch_as(batch.returns, v[0])
     batch.adv = batch.returns - batch.v
     if self._rew_norm:
         mean, std = batch.adv.mean(), batch.adv.std()
         if not np.isclose(std.item(), 0, 1e-2):
             batch.adv = (batch.adv - mean) / std
     return batch
Exemple #2
0
 def learn(self, batch: Batch, batch_size: int, repeat: int,
           **kwargs) -> Dict[str, List[float]]:
     self._batch = batch_size
     losses, clip_losses, vf_losses, ent_losses = [], [], [], []
     v = []
     old_log_prob = []
     with torch.no_grad():
         for b in batch.split(batch_size, shuffle=False):
             v.append(self.critic(b.obs))
             old_log_prob.append(
                 self(b).dist.log_prob(
                     torch.tensor(b.act, device=v[0].device)))
     batch.v = torch.cat(v, dim=0)  # old value
     dev = batch.v.device
     batch.act = torch.tensor(batch.act, dtype=torch.float, device=dev)
     batch.logp_old = torch.cat(old_log_prob, dim=0)
     batch.returns = torch.tensor(batch.returns,
                                  dtype=torch.float,
                                  device=dev).reshape(batch.v.shape)
     if self._rew_norm:
         mean, std = batch.returns.mean(), batch.returns.std()
         if std > self.__eps:
             batch.returns = (batch.returns - mean) / std
     batch.adv = batch.returns - batch.v
     if self._rew_norm:
         mean, std = batch.adv.mean(), batch.adv.std()
         if std > self.__eps:
             batch.adv = (batch.adv - mean) / std
     for _ in range(repeat):
         for b in batch.split(batch_size):
             dist = self(b).dist
             value = self.critic(b.obs)
             ratio = (dist.log_prob(b.act) - b.logp_old).exp().float()
             surr1 = ratio * b.adv
             surr2 = ratio.clamp(1. - self._eps_clip,
                                 1. + self._eps_clip) * b.adv
             if self._dual_clip:
                 clip_loss = -torch.max(torch.min(surr1, surr2),
                                        self._dual_clip * b.adv).mean()
             else:
                 clip_loss = -torch.min(surr1, surr2).mean()
             clip_losses.append(clip_loss.item())
             if self._value_clip:
                 v_clip = b.v + (value - b.v).clamp(-self._eps_clip,
                                                    self._eps_clip)
                 vf1 = (b.returns - value).pow(2)
                 vf2 = (b.returns - v_clip).pow(2)
                 vf_loss = .5 * torch.max(vf1, vf2).mean()
             else:
                 vf_loss = .5 * (b.returns - value).pow(2).mean()
             vf_losses.append(vf_loss.item())
             e_loss = dist.entropy().mean()
             ent_losses.append(e_loss.item())
             loss = clip_loss + self._w_vf * vf_loss - self._w_ent * e_loss
             losses.append(loss.item())
             self.optim.zero_grad()
             loss.backward()
             nn.utils.clip_grad_norm_(
                 list(self.actor.parameters()) +
                 list(self.critic.parameters()), self._max_grad_norm)
             self.optim.step()
     return {
         'loss': losses,
         'loss/clip': clip_losses,
         'loss/vf': vf_losses,
         'loss/ent': ent_losses,
     }