Exemple #1
0
 def process_fn(self, batch: Batch, buffer: ReplayBuffer,
                indice: np.ndarray) -> Batch:
     if self._rew_norm:
         mean, std = batch.rew.mean(), batch.rew.std()
         if not np.isclose(std, 0, 1e-2):
             batch.rew = (batch.rew - mean) / std
     v, v_, old_log_prob = [], [], []
     with torch.no_grad():
         for b in batch.split(self._batch, shuffle=False):
             v_.append(self.critic(b.obs_next))
             v.append(self.critic(b.obs))
             old_log_prob.append(self(b).dist.log_prob(
                 to_torch_as(b.act, v[0])))
     v_ = to_numpy(torch.cat(v_, dim=0))
     batch = self.compute_episodic_return(
         batch, v_, gamma=self._gamma, gae_lambda=self._lambda,
         rew_norm=self._rew_norm)
     batch.v = torch.cat(v, dim=0).flatten()  # old value
     batch.act = to_torch_as(batch.act, v[0])
     batch.logp_old = torch.cat(old_log_prob, dim=0)
     batch.returns = to_torch_as(batch.returns, v[0])
     batch.adv = batch.returns - batch.v
     if self._rew_norm:
         mean, std = batch.adv.mean(), batch.adv.std()
         if not np.isclose(std.item(), 0, 1e-2):
             batch.adv = (batch.adv - mean) / std
     return batch
Exemple #2
0
 def _compute_returns(self, batch: Batch, buffer: ReplayBuffer,
                      indice: np.ndarray) -> Batch:
     v_s, v_s_ = [], []
     with torch.no_grad():
         for b in batch.split(self._batch, shuffle=False, merge_last=True):
             v_s.append(self.critic(b.obs))
             v_s_.append(self.critic(b.obs_next))
     batch.v_s = torch.cat(v_s, dim=0).flatten()  # old value
     v_s = batch.v_s.cpu().numpy()
     v_s_ = torch.cat(v_s_, dim=0).flatten().cpu().numpy()
     # when normalizing values, we do not minus self.ret_rms.mean to be numerically
     # consistent with OPENAI baselines' value normalization pipeline. Emperical
     # study also shows that "minus mean" will harm performances a tiny little bit
     # due to unknown reasons (on Mujoco envs, not confident, though).
     if self._rew_norm:  # unnormalize v_s & v_s_
         v_s = v_s * np.sqrt(self.ret_rms.var + self._eps)
         v_s_ = v_s_ * np.sqrt(self.ret_rms.var + self._eps)
     unnormalized_returns, advantages = self.compute_episodic_return(
         batch,
         buffer,
         indice,
         v_s_,
         v_s,
         gamma=self._gamma,
         gae_lambda=self._lambda)
     if self._rew_norm:
         batch.returns = unnormalized_returns / \
             np.sqrt(self.ret_rms.var + self._eps)
         self.ret_rms.update(unnormalized_returns)
     else:
         batch.returns = unnormalized_returns
     batch.returns = to_torch_as(batch.returns, batch.v_s)
     batch.adv = to_torch_as(advantages, batch.v_s)
     return batch
Exemple #3
0
 def process_fn(
     self, batch: Batch, buffer: ReplayBuffer, indice: np.ndarray
 ) -> Batch:
     v_s, v_s_, old_log_prob = [], [], []
     with torch.no_grad():
         for b in batch.split(self._batch, shuffle=False, merge_last=True):
             v_s.append(self.critic(b.obs))
             v_s_.append(self.critic(b.obs_next))
             old_log_prob.append(self(b).dist.log_prob(to_torch_as(b.act, v_s[0])))
     batch.v_s = torch.cat(v_s, dim=0).flatten()  # old value
     v_s = to_numpy(batch.v_s)
     v_s_ = to_numpy(torch.cat(v_s_, dim=0).flatten())
     if self._rew_norm:  # unnormalize v_s & v_s_
         v_s = v_s * np.sqrt(self.ret_rms.var + self._eps) + self.ret_rms.mean
         v_s_ = v_s_ * np.sqrt(self.ret_rms.var + self._eps) + self.ret_rms.mean
     unnormalized_returns, advantages = self.compute_episodic_return(
         batch, buffer, indice, v_s_, v_s,
         gamma=self._gamma, gae_lambda=self._lambda)
     if self._rew_norm:
         batch.returns = (unnormalized_returns - self.ret_rms.mean) / \
             np.sqrt(self.ret_rms.var + self._eps)
         self.ret_rms.update(unnormalized_returns)
         mean, std = np.mean(advantages), np.std(advantages)
         advantages = (advantages - mean) / std  # per-batch norm
     else:
         batch.returns = unnormalized_returns
     batch.act = to_torch_as(batch.act, batch.v_s)
     batch.logp_old = torch.cat(old_log_prob, dim=0)
     batch.returns = to_torch_as(batch.returns, batch.v_s)
     batch.adv = to_torch_as(advantages, batch.v_s)
     return batch
Exemple #4
0
 def process_fn(self, batch: Batch, buffer: ReplayBuffer,
                indices: np.ndarray) -> Batch:
     batch = super().process_fn(batch, buffer, indices)
     old_log_prob = []
     with torch.no_grad():
         for b in batch.split(self._batch, shuffle=False, merge_last=True):
             old_log_prob.append(self(b).dist.log_prob(b.act))
     batch.logp_old = torch.cat(old_log_prob, dim=0)
     if self._norm_adv:
         batch.adv = (batch.adv - batch.adv.mean()) / batch.adv.std()
     return batch
Exemple #5
0
 def learn(self, batch: Batch, batch_size: int, repeat: int,
           **kwargs) -> Dict[str, List[float]]:
     self._batch = batch_size
     losses, clip_losses, vf_losses, ent_losses = [], [], [], []
     v = []
     old_log_prob = []
     with torch.no_grad():
         for b in batch.split(batch_size, shuffle=False):
             v.append(self.critic(b.obs))
             old_log_prob.append(
                 self(b).dist.log_prob(
                     torch.tensor(b.act, device=v[0].device)))
     batch.v = torch.cat(v, dim=0)  # old value
     dev = batch.v.device
     batch.act = torch.tensor(batch.act, dtype=torch.float, device=dev)
     batch.logp_old = torch.cat(old_log_prob, dim=0)
     batch.returns = torch.tensor(batch.returns,
                                  dtype=torch.float,
                                  device=dev).reshape(batch.v.shape)
     if self._rew_norm:
         mean, std = batch.returns.mean(), batch.returns.std()
         if std > self.__eps:
             batch.returns = (batch.returns - mean) / std
     batch.adv = batch.returns - batch.v
     if self._rew_norm:
         mean, std = batch.adv.mean(), batch.adv.std()
         if std > self.__eps:
             batch.adv = (batch.adv - mean) / std
     for _ in range(repeat):
         for b in batch.split(batch_size):
             dist = self(b).dist
             value = self.critic(b.obs)
             ratio = (dist.log_prob(b.act) - b.logp_old).exp().float()
             surr1 = ratio * b.adv
             surr2 = ratio.clamp(1. - self._eps_clip,
                                 1. + self._eps_clip) * b.adv
             if self._dual_clip:
                 clip_loss = -torch.max(torch.min(surr1, surr2),
                                        self._dual_clip * b.adv).mean()
             else:
                 clip_loss = -torch.min(surr1, surr2).mean()
             clip_losses.append(clip_loss.item())
             if self._value_clip:
                 v_clip = b.v + (value - b.v).clamp(-self._eps_clip,
                                                    self._eps_clip)
                 vf1 = (b.returns - value).pow(2)
                 vf2 = (b.returns - v_clip).pow(2)
                 vf_loss = .5 * torch.max(vf1, vf2).mean()
             else:
                 vf_loss = .5 * (b.returns - value).pow(2).mean()
             vf_losses.append(vf_loss.item())
             e_loss = dist.entropy().mean()
             ent_losses.append(e_loss.item())
             loss = clip_loss + self._w_vf * vf_loss - self._w_ent * e_loss
             losses.append(loss.item())
             self.optim.zero_grad()
             loss.backward()
             nn.utils.clip_grad_norm_(
                 list(self.actor.parameters()) +
                 list(self.critic.parameters()), self._max_grad_norm)
             self.optim.step()
     return {
         'loss': losses,
         'loss/clip': clip_losses,
         'loss/vf': vf_losses,
         'loss/ent': ent_losses,
     }