Example #1
0
    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        bs = batch.batch_size
        max_t = batch.max_seq_length
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :-1]

        # Calculate action policy distribution and entropy
        mac_out = []
        mac_out_entropy = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length - 1):
            agent_outs = self.mac.forward(batch, t=t, return_logits=True)
            agent_entropy = multinomial_entropy(agent_outs).mean(dim=-1,
                                                                 keepdim=True)
            agent_probs = th.nn.functional.softmax(agent_outs, dim=-1)
            mac_out.append(agent_probs)
            mac_out_entropy.append(agent_entropy)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        mac_out_entropy = th.stack(mac_out_entropy, dim=1)

        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0

        # Mix action probability and state to estimate joint Q-value
        mix_loss = self.critic(mac_out, batch["state"][:, :-1])

        mask = mask.expand_as(mix_loss)
        entropy_mask = copy.deepcopy(mask)

        mix_loss = (mix_loss * mask).sum() / mask.sum()

        # Adaptive Entropy Regularization
        entropy_loss = (mac_out_entropy *
                        entropy_mask).sum() / entropy_mask.sum()
        entropy_ratio = self.entropy_coef / entropy_loss.item()

        mix_loss = -mix_loss - entropy_ratio * entropy_loss

        # Optimise agents
        self.agent_optimiser.zero_grad()
        mix_loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.agent_params,
                                                self.args.grad_norm_clip)
        self.agent_optimiser.step()

        if t_env - self.log_stats_t_agent >= self.args.learner_log_interval:
            self.logger.log_stat("mix_loss", mix_loss.item(), t_env)
            self.logger.log_stat("entropy", entropy_loss.item(), t_env)
            self.logger.log_stat("agent_grad_norm", grad_norm, t_env)
            self.log_stats_t_agent = t_env
Example #2
0
    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        self.train_critic_td(batch, t_env, episode_num)
        
        # Get the relevant quantities
        bs = batch.batch_size
        max_t = batch.max_seq_length
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :-1]

        mac_out = []
        mac_out_entropy = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length - 1):
            # -------------------------------------------------------------------------------------#
            # NOTE: We hard-coded the forward pass arguments for experiment, we will fix this later
            # -------------------------------------------------------------------------------------#
            agent_outs = self.mac.forward(batch, t=t, test_mode=True, gumbel=True)
            agent_entropy = multinomial_entropy(agent_outs).mean(dim=-1, keepdim=True)
            agent_probs = th.nn.functional.softmax(agent_outs, dim=-1)
            mac_out.append(agent_probs)
            mac_out_entropy.append(agent_entropy)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        mac_out_entropy = th.stack(mac_out_entropy, dim=1)

        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out/mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0

        mix_loss = self.critic(mac_out, batch["state"][:, :-1])

        mask = mask.expand_as(mix_loss)
        entropy_mask = copy.deepcopy(mask)

        mix_loss = (mix_loss * mask).sum() / mask.sum()
        entropy_loss = (mac_out_entropy * entropy_mask).sum() / entropy_mask.sum()
        entropy_ratio = self.entropy_coef / entropy_loss.item()

        mix_loss = - mix_loss - entropy_ratio * entropy_loss

        # Optimise agents
        self.agent_optimiser.zero_grad()
        mix_loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.agent_params, self.args.grad_norm_clip)
        self.agent_optimiser.step()

        if t_env - self.log_stats_t_agent >= self.args.learner_log_interval:
            self.logger.log_stat("mix_loss", mix_loss.item(), t_env)
            self.logger.log_stat("entropy", entropy_loss.item(), t_env)
            self.logger.log_stat("agent_grad_norm", grad_norm, t_env)
            self.log_stats_t_agent = t_env