コード例 #1
0
ファイル: policy_gradient_v2.py プロジェクト: kjyeung/pymarl2
    def _calculate_advs(self, batch, rewards, terminated, actions,
                        avail_actions, mask, bs, max_t):
        mac_out = []
        q_outs = []
        # Roll out experiences
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_out, q_out = self.mac.forward(batch, t=t)
            mac_out.append(agent_out)
            q_outs.append(q_out)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        q_outs = th.stack(q_outs, dim=1)  # Concat over time

        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0

        # Calculated baseline
        pi = mac_out[:, :-1]  #[bs, t, n_agents, n_actions]
        pi_taken = th.gather(pi, dim=-1, index=actions[:, :-1]).squeeze(
            -1)  #[bs, t, n_agents]
        action_mask = mask.repeat(1, 1, self.n_agents)
        pi_taken[action_mask == 0] = 1.0
        log_pi_taken = th.log(pi_taken).reshape(-1)

        # Calculate entropy
        entropy = categorical_entropy(pi).reshape(-1)  #[bs, t, n_agents, 1]

        # Calculate q targets
        targets_taken = q_outs.squeeze(-1)  #[bs, t, n_agents]
        if self.args.mixer:
            targets_taken = self.mixer(targets_taken,
                                       batch["state"][:, :])  #[bs, t, 1]

        # Calculate td-lambda targets
        targets = build_td_lambda_targets(rewards, terminated, mask,
                                          targets_taken, self.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        advantages = targets - targets_taken[:, :-1]
        advantages = advantages.unsqueeze(2).repeat(1, 1, self.n_agents,
                                                    1).reshape(-1)

        td_error = targets_taken[:, :-1] - targets.detach()
        td_error = td_error.unsqueeze(2).repeat(1, 1, self.n_agents,
                                                1).reshape(-1)

        return advantages, td_error, targets_taken[:, :-1].unsqueeze(2).repeat(
            1, 1, self.n_agents, 1).reshape(-1), log_pi_taken, entropy
コード例 #2
0
ファイル: fmac_learner.py プロジェクト: wwxFromTju/pymarl2
    def train(self,
              batch: EpisodeBatch,
              t_env: int,
              episode_num: int,
              off=False):
        # Get the relevant data
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        actions_onehot = batch["actions_onehot"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Retrace Q target
        with th.no_grad():
            q1, _ = self.target_critic(batch, batch["actions_onehot"].detach())
            target_vals = self.target_mixer(q1, batch["state"])

            lambd = 0 if off else self.args.lambd
            target_vals = build_td_lambda_targets(rewards, terminated, mask,
                                                  target_vals, self.n_agents,
                                                  self.args.gamma, lambd)

        # Train the critic
        # Current Q network forward
        q1, _ = self.critic(batch[:, :-1], actions_onehot.detach())
        q_taken = self.mixer(q1, batch["state"][:, :-1])
        critic_loss = 0.5 * (
            (q_taken - target_vals.detach()) * mask).pow(2).sum() / mask.sum()

        self.critic_optimiser.zero_grad()
        critic_loss.backward()
        critic_grad_norm = th.nn.utils.clip_grad_norm_(
            self.critic_params, self.args.grad_norm_clip)
        self.critic_optimiser.step()

        # Train the actor
        if not off:
            pi = []
            self.mac.init_hidden(batch.batch_size)
            for t in range(batch.max_seq_length - 1):
                agent_outs = self.mac.forward(batch, t=t)
                pi.append(agent_outs)
            pi = th.stack(pi, dim=1)  # Concat over time b, t, a, probs

            q1, _ = self.critic(batch[:, :-1], pi)
            q = self.mixer(q1, batch["state"][:, :-1])
            pg_loss = -(q * mask).sum() / mask.sum()

            entropy_loss = categorical_entropy(pi).mean(
                -1, keepdim=True)  # mean over agents
            entropy_loss[mask == 0] = 0  # fill nan
            entropy_loss = (entropy_loss * mask).sum() / mask.sum()
            loss = pg_loss - self.args.entropy_coef * entropy_loss / entropy_loss.item(
            )

            self.agent_optimiser.zero_grad()
            loss.backward()
            agent_grad_norm = th.nn.utils.clip_grad_norm_(
                self.agent_params, self.args.grad_norm_clip)
            self.agent_optimiser.step()

        # target_update
        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        # log
        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("critic_loss", critic_loss.item(), t_env)
            self.logger.log_stat("critic_grad_norm", critic_grad_norm.item(),
                                 t_env)
            self.logger.log_stat("target_vals",
                                 (target_vals * mask).sum().item() /
                                 mask.sum().item(), t_env)

            if not off:
                self.logger.log_stat("pg_loss", pg_loss.item(), t_env)
                self.logger.log_stat("entropy_loss", entropy_loss.item(),
                                     t_env)
                self.logger.log_stat("agent_grad_norm", agent_grad_norm.item(),
                                     t_env)
                agent_mask = mask.repeat(1, 1, self.n_agents)
                self.logger.log_stat(
                    "pi_max", (pi.max(dim=-1)[0] * agent_mask).sum().item() /
                    agent_mask.sum().item(), t_env)
                self.log_stats_t = t_env
コード例 #3
0
    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :-1]

        old_probs = batch["probs"][:, :-1]
        old_probs[avail_actions == 0] = 1e-10
        old_logprob = th.log(th.gather(old_probs, dim=3,
                                       index=actions)).detach()
        mask_agent = mask.unsqueeze(2).repeat(1, 1, self.n_agents, 1)

        # targets and advantages
        with th.no_grad():
            old_values = []
            self.critic.init_hidden(batch.batch_size)
            for t in range(batch.max_seq_length):
                agent_outs = self.critic.forward(batch, t=t)
                old_values.append(agent_outs)
            old_values = th.stack(old_values, dim=1)

            if self.use_value_norm:
                value_shape = old_values.shape
                values = self.value_norm.denormalize(
                    old_values.view(-1)).view(value_shape)

            advantages, targets = build_gae_targets(
                rewards.unsqueeze(2).repeat(1, 1, self.n_agents, 1),
                mask_agent, values, self.args.gamma, self.args.gae_lambda)

            if self.use_value_norm:
                targets_shape = targets.shape
                targets = targets.reshape(-1)
                self.value_norm.update(targets)
                targets = self.value_norm.normalize(targets).view(
                    targets_shape)

        advantages = (advantages - advantages.mean()) / (advantages.std() +
                                                         1e-6)

        # PPO Loss
        for _ in range(self.args.mini_epochs):
            # Critic
            values = []
            self.critic.init_hidden(batch.batch_size)
            for t in range(batch.max_seq_length - 1):
                agent_outs = self.critic.forward(batch, t=t)
                values.append(agent_outs)
            values = th.stack(values, dim=1)

            # value clip
            values_clipped = old_values[:, :-1] + (
                values - old_values[:, :-1]).clamp(-self.args.eps_clip,
                                                   self.args.eps_clip)

            # 0-out the targets that came from padded data
            td_error = th.max((values - targets.detach())**2,
                              (values_clipped - targets.detach())**2)
            masked_td_error = td_error * mask_agent
            critic_loss = 0.5 * masked_td_error.sum() / mask_agent.sum()

            # Actor
            pi = []
            self.mac.init_hidden(batch.batch_size)
            for t in range(batch.max_seq_length - 1):
                agent_outs = self.mac.forward(batch, t=t)
                pi.append(agent_outs)
            pi = th.stack(pi, dim=1)  # Concat over time

            pi[avail_actions == 0] = 1e-10
            pi_taken = th.gather(pi, dim=3, index=actions)
            log_pi_taken = th.log(pi_taken)

            ratios = th.exp(log_pi_taken - old_logprob)
            surr1 = ratios * advantages
            surr2 = th.clamp(ratios, 1 - self.args.eps_clip,
                             1 + self.args.eps_clip) * advantages
            actor_loss = -(th.min(surr1, surr2) *
                           mask_agent).sum() / mask_agent.sum()

            # entropy
            entropy_loss = categorical_entropy(pi).mean(
                -1, keepdim=True)  # mean over agents
            entropy_loss[mask == 0] = 0  # fill nan
            entropy_loss = (entropy_loss * mask).sum() / mask.sum()
            loss = actor_loss + self.args.critic_coef * critic_loss - self.args.entropy * entropy_loss / entropy_loss.item(
            )

            # Optimise agents
            self.optimiser.zero_grad()
            loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                    self.args.grad_norm_clip)
            self.optimiser.step()

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            mask_elems = mask_agent.sum().item()
            self.logger.log_stat("advantage_mean",
                                 (advantages * mask_agent).sum().item() /
                                 mask_elems, t_env)
            self.logger.log_stat("actor_loss", actor_loss.item(), t_env)
            self.logger.log_stat("entropy_loss", entropy_loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            self.logger.log_stat("lr", self.last_lr, t_env)
            self.logger.log_stat("critic_loss", critic_loss.item(), t_env)
            self.logger.log_stat("target_mean",
                                 (targets * mask_agent).sum().item() /
                                 mask_elems, t_env)
            self.log_stats_t = t_env
コード例 #4
0
    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :-1]
        
        old_probs = batch["probs"][:, :-1]
        old_probs[avail_actions == 0] = 1e-10
        old_logprob = th.log(th.gather(old_probs, dim=3, index=actions)).detach()
        mask_agent = mask.unsqueeze(2).repeat(1, 1, self.n_agents, 1)
        
        for _ in range(self.args.mini_epochs):
            # Critic
            values = self.critic(batch)
            advantages, targets = build_gae_targets(
                rewards, mask, values, self.args.gamma, self.args.gae_lambda)

            # 0-out the targets that came from padded data
            td_error = (values[:, :-1] - targets.detach())
            masked_td_error = td_error * mask
            critic_loss = 0.5 * (masked_td_error ** 2).sum() / mask.sum()

            # Actor
            advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
            advantages = advantages.unsqueeze(2).repeat(1, 1, self.n_agents, 1)
            
            pi = []
            self.mac.init_hidden(batch.batch_size)
            for t in range(batch.max_seq_length-1):
                agent_outs = self.mac.forward(batch, t=t)
                pi.append(agent_outs)
            pi = th.stack(pi, dim=1)  # Concat over time

            pi[avail_actions == 0] = 1e-10
            pi_taken = th.gather(pi, dim=3, index=actions)
            log_pi_taken = th.log(pi_taken)
            
            ratios = th.exp(log_pi_taken - old_logprob)
            surr1 = ratios * advantages
            surr2 = th.clamp(ratios, 1-self.args.eps_clip, 1+self.args.eps_clip) * advantages
            actor_loss = -(th.min(surr1, surr2) * mask_agent).sum() / mask_agent.sum()
            
            # entropy
            entropy_loss = categorical_entropy(pi).mean(-1, keepdim=True) # mean over agents
            entropy_loss[mask == 0] = 0 # fill nan
            entropy_loss = (entropy_loss* mask).sum() / mask.sum()
            loss = actor_loss + self.args.critic_coef * critic_loss - self.args.entropy * entropy_loss / entropy_loss.item()

            # Optimise agents
            self.optimiser.zero_grad()
            loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
            self.optimiser.step()
            
            # Dynamic LR
            if self.args.lr_threshold:
                with th.no_grad():
                    kl_dist = 0.5 * ((old_logprob - log_pi_taken) ** 2)
                    kl_dist = (kl_dist * mask).sum() / mask.sum()
                    kl_dist = kl_dist.item()
                    
                    if kl_dist > (2.0 * self.args.lr_threshold):
                        self.last_lr = max(self.last_lr / 1.5, 1e-6)
                    if kl_dist < (0.5 * self.args.lr_threshold):
                        self.last_lr = min(self.last_lr * 1.5, 1e-2)
                            
                    for param_group in self.optimiser.param_groups:
                        param_group['lr'] = self.last_lr
                        

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("advantage_mean", (advantages * mask_agent).sum().item() / mask_agent.sum().item(), t_env)
            self.logger.log_stat("actor_loss", actor_loss.item(), t_env)
            self.logger.log_stat("entropy_loss", entropy_loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            self.logger.log_stat("lr", self.last_lr, t_env)
            self.logger.log_stat("critic_loss", critic_loss.item(), t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat("td_error_abs", masked_td_error.abs().sum().item() / mask_elems, t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() / mask_elems, t_env)
            self.log_stats_t = t_env