def train_critic_batched(self, critic, target_critic, optimiser, batch, rewards, terminated, actions,
                             avail_actions, mask):
        # Optimise critic
        target_vals = target_critic(batch)

        target_vals = target_vals[:, :-1]

        if critic.output_type == 'q':
            target_vals = th.gather(target_vals, dim=3, index=actions)
            target_vals = th.cat([target_vals[:, 1:], th.zeros_like(target_vals[:, 0:1])], dim=1)
        target_vals = target_vals.squeeze(3)

        # Calculate td-lambda targets
        targets = build_td_lambda_targets(rewards, terminated, mask, target_vals, self.n_agents,
                                         self.args.gamma, self.args.td_lambda)

        running_log = {
            "critic_loss": [],
            "critic_grad_norm": [],
            "td_error_abs": [],
            "target_mean": [],
            "q_taken_mean": [],
        }

        all_vals = critic(batch)
        vals = all_vals.clone()[:, :-1]

        if critic.output_type == "q":
            vals = th.gather(vals, dim=3, index=actions)
        vals = vals.squeeze(3)

        td_error = (vals - targets.detach())

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error ** 2).sum() / mask.sum()
        optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(optimiser.param_groups[0]["params"], self.args.grad_norm_clip)
        optimiser.step()
        self.critic_training_steps += 1

        running_log["critic_loss"].append(loss.item())
        running_log["critic_grad_norm"].append(grad_norm)
        mask_elems = mask.sum().item()
        running_log["td_error_abs"].append((masked_td_error.abs().sum().item() / mask_elems))
        running_log["q_taken_mean"].append((vals * mask).sum().item() / mask_elems)
        running_log["target_mean"].append((targets * mask).sum().item() / mask_elems)

        if critic.output_type == 'q':
            q_vals = all_vals[:, :-1]
            v_s = None
        else:
            q_vals = build_td_lambda_targets(rewards, terminated, mask, all_vals.squeeze(3)[:, 1:], self.n_agents,
                                             self.args.gamma, self.args.td_lambda)
            v_s = vals

        return q_vals, v_s, running_log
Exemple #2
0
    def _train_critic(self, batch, rewards, terminated, actions, avail_actions,
                      mask, bs, max_t):
        # Optimise critic
        target_q_vals = self.target_critic(batch)[:, :]
        targets_taken = th.gather(target_q_vals, dim=3,
                                  index=actions).squeeze(3)

        # Calculate td-lambda targets
        targets = build_td_lambda_targets(rewards, terminated, mask,
                                          targets_taken, self.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        q_vals = th.zeros_like(target_q_vals)[:, :-1]

        running_log = {
            "critic_loss": [],
            "critic_grad_norm": [],
            "td_error_abs": [],
            "target_mean": [],
            "q_taken_mean": [],
        }

        for t in reversed(range(rewards.size(1))):
            mask_t = mask[:, t].expand(-1, self.n_agents)
            if mask_t.sum() == 0:
                continue

            q_t = self.critic(batch, t)
            q_vals[:, t] = q_t.view(bs, self.n_agents, self.n_actions)
            q_taken = th.gather(q_t, dim=3,
                                index=actions[:,
                                              t:t + 1]).squeeze(3).squeeze(1)
            targets_t = targets[:, t]

            td_error = (q_taken - targets_t.detach())

            # 0-out the targets that came from padded data
            masked_td_error = td_error * mask_t

            # Normal L2 loss, take mean over actual data
            loss = (masked_td_error**2).sum() / mask_t.sum()
            self.critic_optimiser.zero_grad()
            loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.critic_params,
                                                    self.args.grad_norm_clip)
            self.critic_optimiser.step()
            self.critic_training_steps += 1

            running_log["critic_loss"].append(loss.item())
            running_log["critic_grad_norm"].append(grad_norm)
            mask_elems = mask_t.sum().item()
            running_log["td_error_abs"].append(
                (masked_td_error.abs().sum().item() / mask_elems))
            running_log["q_taken_mean"].append(
                (q_taken * mask_t).sum().item() / mask_elems)
            running_log["target_mean"].append(
                (targets_t * mask_t).sum().item() / mask_elems)

        return q_vals, running_log
    def _train_critic(self, batch, rewards, terminated, mask):
        # Optimise critic
        target_v_vals = self.target_critic(batch)[:, :]
        v_vals = th.zeros_like(target_v_vals)

        # Calculate td-lambda targets
        target_v_vals = target_v_vals.squeeze(3)
        targets = build_td_lambda_targets(rewards, terminated, mask,
                                          target_v_vals, self.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        running_log = {
            "critic_loss": [],
            "critic_grad_norm": [],
            "td_error_abs": [],
            "target_mean": [],
            "q_taken_mean": [],
        }

        for t in reversed(range(rewards.size(1))):
            v_t = self.critic(batch, t)
            v_vals[:, t] = v_t.squeeze(1)

            mask_t = mask[:, t]
            if mask_t.sum() == 0:
                continue

            v_t = v_t.squeeze(3).squeeze(1)
            targets_t = targets[:, t]

            td_error = (v_t - targets_t.detach())

            # 0-out the targets that came from padded data
            masked_td_error = td_error * mask_t

            # Normal L2 loss, take mean over actual data
            loss = (masked_td_error**2).sum() / mask_t.sum()

            self.critic_optimiser.zero_grad()
            loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.critic_params,
                                                    self.args.grad_norm_clip)
            self.critic_optimiser.step()
            self.critic_training_steps += 1

            running_log["critic_loss"].append(loss.item())
            running_log["critic_grad_norm"].append(grad_norm)
            mask_elems = mask_t.sum().item()
            running_log["td_error_abs"].append(
                (masked_td_error.abs().sum().item() / mask_elems))
            running_log["q_taken_mean"].append(
                (v_t * mask_t).sum().item() / mask_elems)
            running_log["target_mean"].append(
                (targets_t * mask_t).sum().item() / mask_elems)

        qs = v_vals[:, :-1].squeeze(3)
        vs = v_vals[:, :-1].squeeze(3)

        return qs, vs, running_log
Exemple #4
0
    def train_critic_td(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        rewards = batch["reward"]
        actions = batch["actions_onehot"]
        terminated = batch["terminated"].float()
        mask = batch["filled"].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Optimise critic
        target_q_vals = self.target_critic(actions, batch["state"])[:, :]

        # Calculate td-lambda targets
        targets = build_td_lambda_targets(rewards, terminated, mask, target_q_vals, self.n_agents, self.args.gamma, self.args.td_lambda)

        running_log = {
            "critic_loss": [],
            "critic_grad_norm": [],
            "td_error_abs": [],
            "target_mean": [],
            "q_t_mean": [],
        }

        mask = mask[:, :-1]

        q_t = self.critic(actions[:, :-1], batch["state"][:, :-1])

        td_error = (q_t - targets.detach())

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error ** 2).sum() / mask.sum()
        self.critic_optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.critic_params, self.args.grad_norm_clip)
        self.critic_optimiser.step()
        self.critic_training_steps += 1

        running_log["critic_loss"].append(loss.item())
        running_log["critic_grad_norm"].append(grad_norm)
        mask_elems = mask.sum().item()
        running_log["td_error_abs"].append((masked_td_error.abs().sum().item() / mask_elems))
        running_log["q_t_mean"].append((q_t * mask).sum().item() / mask_elems)
        running_log["target_mean"].append((targets * mask).sum().item() / mask_elems)

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            ts_logged = len(running_log["critic_loss"])
            for key in ["critic_loss", "critic_grad_norm", "td_error_abs", "q_t_mean", "target_mean"]:
                self.logger.log_stat(key, sum(running_log[key])/ts_logged, t_env)
            self.log_stats_t = t_env

        # Update target critic
        if (self.critic_training_steps - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = self.critic_training_steps
Exemple #5
0
    def train_critic(self, on_batch, best_batch=None, log=None):
        bs = on_batch.batch_size
        max_t = on_batch.max_seq_length
        rewards = on_batch["reward"][:, :-1]
        actions = on_batch["actions"][:, :]
        terminated = on_batch["terminated"][:, :-1].float()
        mask = on_batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])



        #build_target_q
        target_inputs = self.target_critic._build_inputs(on_batch, bs, max_t)
        target_q_vals = self.target_critic.forward(target_inputs).detach()
        targets_taken = th.mean(th.gather(target_q_vals, dim=3, index=actions).squeeze(3), dim=2, keepdim=True)
        target_q = build_td_lambda_targets(rewards, terminated, mask, targets_taken, self.n_agents, self.args.gamma, self.args.td_lambda).repeat(1, 1, self.n_agents)

        inputs = self.critic._build_inputs(on_batch, bs, max_t)

        if best_batch is not None:
            best_target_q, best_inputs, best_mask, best_actions= self.train_critic_best(best_batch)
            target_q = th.cat((target_q, best_target_q), dim=0)
            inputs = th.cat((inputs, best_inputs), dim=0)
            mask = th.cat((mask, best_mask), dim=0)
            actions = th.cat((actions, best_actions), dim=0)

        mask = mask.repeat(1, 1, self.n_agents)

        #train critic
        for t in range(max_t - 1):
            mask_t = mask[:, t:t+1]
            if mask_t.sum() < 0.5:
                continue
            q_vals = self.critic.forward(inputs[:, t:t+1])
            q_vals = th.gather(q_vals, 3, index=actions[:, t:t+1]).squeeze(3)
            target_q_t = target_q[:, t:t+1]
            q_err = (q_vals - target_q_t) * mask_t
            critic_loss = (q_err ** 2).sum() / mask_t.sum()
            self.critic_optimiser.zero_grad()
            critic_loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.critic_params, self.args.grad_norm_clip)
            self.critic_optimiser.step()
            self.critic_training_steps += 1

            log["critic_loss"].append(critic_loss.item())
            log["critic_grad_norm"].append(grad_norm)
            mask_elems = mask_t.sum().item()
            log["td_error_abs"].append((q_err.abs().sum().item() / mask_elems))
            log["target_mean"].append((target_q_t * mask_t).sum().item() / mask_elems)
            log["q_taken_mean"].append((q_vals * mask_t).sum().item() / mask_elems)

        #update target network
        if (self.critic_training_steps - self.last_target_update_step) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_step = self.critic_training_steps
    def _train_critic(self, batch, rewards, terminated, actions, avail_actions, mask, bs, max_t):
        # Optimise critic
        r_in, target_vals, target_val_ex = self.target_critic(batch)

        r_in, _, target_val_ex_opt = self.critic(batch)
        r_in_taken = th.gather(r_in, dim=3, index=actions)
        r_in = r_in_taken.squeeze(-1)

        target_vals = target_vals.squeeze(-1)

        targets_mix, targets_ex = build_td_lambda_targets(rewards, terminated, mask, target_vals, self.n_agents,
                                                          self.args.gamma, self.args.td_lambda, r_in, target_val_ex)

        vals_mix = th.zeros_like(target_vals)[:, :-1]
        vals_ex = target_val_ex_opt[:, :-1]

        running_log = {
            "critic_loss": [],
            "critic_grad_norm": [],
            "td_error_abs": [],
            "target_mean": [],
            "value_mean": [],
        }

        for t in reversed(range(rewards.size(1))):
            mask_t = mask[:, t].expand(-1, self.n_agents)
            if mask_t.sum() == 0:
                continue

            _, q_t, _ = self.critic(batch, t)  # 8,1,3,1,
            vals_mix[:, t] = q_t.view(bs, self.n_agents)
            targets_t = targets_mix[:, t]

            td_error = (q_t.view(bs, self.n_agents) - targets_t.detach())

            # 0-out the targets that came from padded data
            masked_td_error = td_error * mask_t

            # Normal L2 loss, take mean over actual data
            loss = (masked_td_error ** 2).sum() / mask_t.sum()
            self.critic_optimiser.zero_grad()
            loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.critic_params, self.args.grad_norm_clip)
            self.critic_optimiser.step()
            self.critic_training_steps += 1

            running_log["critic_loss"].append(loss.item())
            running_log["critic_grad_norm"].append(grad_norm)
            mask_elems = mask_t.sum().item()
            running_log["td_error_abs"].append((masked_td_error.abs().sum().item() / mask_elems))
            running_log["value_mean"].append((q_t.view(bs, self.n_agents) * mask_t).sum().item() / mask_elems)
            running_log["target_mean"].append((targets_t * mask_t).sum().item() / mask_elems)

        return vals_mix, running_log, targets_mix, targets_ex, vals_ex, r_in
    def _calculate_advs(self, batch, rewards, terminated, actions,
                        avail_actions, mask, bs, max_t):
        mac_out = []
        q_outs = []
        # Roll out experiences
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_out, q_out = self.mac.forward(batch, t=t)
            mac_out.append(agent_out)
            q_outs.append(q_out)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        q_outs = th.stack(q_outs, dim=1)  # Concat over time

        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0

        # Calculated baseline
        pi = mac_out[:, :-1]  #[bs, t, n_agents, n_actions]
        pi_taken = th.gather(pi, dim=-1, index=actions[:, :-1]).squeeze(
            -1)  #[bs, t, n_agents]
        action_mask = mask.repeat(1, 1, self.n_agents)
        pi_taken[action_mask == 0] = 1.0
        log_pi_taken = th.log(pi_taken).reshape(-1)

        # Calculate entropy
        entropy = categorical_entropy(pi).reshape(-1)  #[bs, t, n_agents, 1]

        # Calculate q targets
        targets_taken = q_outs.squeeze(-1)  #[bs, t, n_agents]
        if self.args.mixer:
            targets_taken = self.mixer(targets_taken,
                                       batch["state"][:, :])  #[bs, t, 1]

        # Calculate td-lambda targets
        targets = build_td_lambda_targets(rewards, terminated, mask,
                                          targets_taken, self.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        advantages = targets - targets_taken[:, :-1]
        advantages = advantages.unsqueeze(2).repeat(1, 1, self.n_agents,
                                                    1).reshape(-1)

        td_error = targets_taken[:, :-1] - targets.detach()
        td_error = td_error.unsqueeze(2).repeat(1, 1, self.n_agents,
                                                1).reshape(-1)

        return advantages, td_error, targets_taken[:, :-1].unsqueeze(2).repeat(
            1, 1, self.n_agents, 1).reshape(-1), log_pi_taken, entropy
Exemple #8
0
    def train_critic(self, on_batch, best_batch=None, log=None):
        bs = on_batch.batch_size
        max_t = on_batch.max_seq_length
        rewards = on_batch["reward"][:, :-1]
        actions = on_batch["actions"][:, :]
        terminated = on_batch["terminated"][:, :-1].float()
        mask = on_batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = on_batch["avail_actions"][:]
        states = on_batch["state"]

        #build_target_q
        target_inputs = self.target_critic._build_inputs(on_batch, bs, max_t)
        target_q_vals = self.target_critic.forward(target_inputs).detach()
        targets_taken = self.target_mixer(
            th.gather(target_q_vals, dim=3, index=actions).squeeze(3), states)
        target_q = build_td_lambda_targets(rewards, terminated, mask,
                                           targets_taken, self.n_agents,
                                           self.args.gamma,
                                           self.args.td_lambda).detach()

        inputs = self.critic._build_inputs(on_batch, bs, max_t)

        mac_out = []
        self.mac.init_hidden(bs)
        for i in range(max_t):
            agent_outs = self.mac.forward(on_batch, t=i)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1).detach()
        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0

        if best_batch is not None:
            best_target_q, best_inputs, best_mask, best_actions, best_mac_out = self.train_critic_best(
                best_batch)
            log["best_reward"] = th.mean(
                best_batch["reward"][:, :-1].squeeze(2).sum(-1), dim=0)
            target_q = th.cat((target_q, best_target_q), dim=0)
            inputs = th.cat((inputs, best_inputs), dim=0)
            mask = th.cat((mask, best_mask), dim=0)
            actions = th.cat((actions, best_actions), dim=0)
            states = th.cat((states, best_batch["state"]), dim=0)
            mac_out = th.cat((mac_out, best_mac_out), dim=0)

        #train critic
        mac_out = mac_out.detach()
        for t in range(max_t - 1):
            mask_t = mask[:, t:t + 1]
            if mask_t.sum() < 0.5:
                continue
            k = self.mixer.k(states[:, t:t + 1]).unsqueeze(3)
            #b = self.mixer.b(states[:, t:t+1])
            q_vals = self.critic.forward(inputs[:, t:t + 1])
            q_ori = q_vals
            q_vals = th.gather(q_vals, 3, index=actions[:, t:t + 1]).squeeze(3)
            q_vals = self.mixer.forward(q_vals, states[:, t:t + 1])
            target_q_t = target_q[:, t:t + 1].detach()
            q_err = (q_vals - target_q_t) * mask_t
            critic_loss = (q_err**2).sum() / mask_t.sum()
            #Here introduce the loss for Qi
            v_vals = th.sum(q_ori * mac_out[:, t:t + 1], dim=3, keepdim=True)
            ad_vals = q_ori - v_vals
            goal = th.sum(k * v_vals, dim=2, keepdim=True) + k * ad_vals
            goal_err = (goal - q_ori) * mask_t
            goal_loss = 0.1 * (goal_err**
                               2).sum() / mask_t.sum() / self.args.n_actions
            #critic_loss += goal_loss
            self.critic_optimiser.zero_grad()
            self.mixer_optimiser.zero_grad()
            critic_loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.c_params,
                                                    self.args.grad_norm_clip)
            self.critic_optimiser.step()
            self.mixer_optimiser.step()
            self.critic_training_steps += 1

            log["critic_loss"].append(critic_loss.item())
            log["critic_grad_norm"].append(grad_norm)
            mask_elems = mask_t.sum().item()
            log["td_error_abs"].append((q_err.abs().sum().item() / mask_elems))
            log["target_mean"].append(
                (target_q_t * mask_t).sum().item() / mask_elems)
            log["q_taken_mean"].append(
                (q_vals * mask_t).sum().item() / mask_elems)
            log["q_max_mean"].append(
                (th.mean(q_ori.max(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)
            log["q_min_mean"].append(
                (th.mean(q_ori.min(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)
            log["q_max_var"].append(
                (th.var(q_ori.max(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)
            log["q_min_var"].append(
                (th.var(q_ori.min(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)

            if (t == 0):
                log["q_max_first"] = (
                    th.mean(q_ori.max(dim=3)[0], dim=2, keepdim=True) *
                    mask_t).sum().item() / mask_elems
                log["q_min_first"] = (
                    th.mean(q_ori.min(dim=3)[0], dim=2, keepdim=True) *
                    mask_t).sum().item() / mask_elems

        #update target network
        if (self.critic_training_steps - self.last_target_update_step
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_step = self.critic_training_steps
Exemple #9
0
    def train(self,
              batch: EpisodeBatch,
              t_env: int,
              episode_num: int,
              epsilon_levin=None):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(3)

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        if not self.args.SubAVG_Agent_flag:
            self.target_mac.init_hidden(batch.batch_size)
        else:
            for i in range(self.args.SubAVG_Agent_K):
                self.target_mac_list[i].init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            if not self.args.SubAVG_Agent_flag:
                target_agent_outs = self.target_mac.forward(batch, t=t)
            # exp:使用 average DQN的target_mac
            else:
                target_agent_outs = 0

                self.target_agent_out_list = []
                for i in range(self.args.SubAVG_Agent_K):
                    target_agent_out = self.target_mac_list[i].forward(batch,
                                                                       t=t)
                    target_agent_outs = target_agent_outs + target_agent_out
                    if self.args.SubAVG_Agent_flag_select:
                        self.target_agent_out_list.append(target_agent_out)
                target_agent_outs = target_agent_outs / self.args.SubAVG_Agent_K
                if self.args.SubAVG_Agent_flag_select:
                    if self.args.SubAVG_Agent_name_select_replacement == 'mean':
                        target_out_select_sum = 0
                        for i in range(self.args.SubAVG_Agent_K):
                            if self.args.SubAVG_Agent_flag_select > 0:
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] <
                                    target_agent_outs, target_agent_outs,
                                    self.target_agent_out_list[i])
                            else:
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] >
                                    target_agent_outs, target_agent_outs,
                                    self.target_agent_out_list[i])
                            target_out_select_sum = target_out_select_sum + target_out_select
                        target_agent_outs = target_out_select_sum / self.args.SubAVG_Agent_K
                    elif self.args.SubAVG_Agent_name_select_replacement == 'zero':
                        target_out_select_sum = 0
                        target_select_bool_sum = 0
                        for i in range(self.args.SubAVG_Agent_K):
                            if self.args.SubAVG_Agent_flag_select > 0:
                                target_select_bool = (
                                    self.target_agent_out_list[i] >
                                    target_agent_outs).float()
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] >
                                    target_agent_outs,
                                    self.target_agent_out_list[i],
                                    th.full_like(target_agent_outs, 0))
                            else:
                                target_select_bool = (
                                    self.target_agent_out_list[i] <
                                    target_agent_outs).float()
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] <
                                    target_agent_outs,
                                    self.target_agent_out_list[i],
                                    th.full_like(target_agent_outs, 0))
                            target_select_bool_sum = target_select_bool_sum + target_select_bool
                            target_out_select_sum = target_out_select_sum + target_out_select
                        if self.levin_iter_target_update < 2:
                            pass  # print("using average directly")
                        else:
                            target_agent_outs = target_out_select_sum / target_select_bool_sum
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out, dim=1)  # Concat across time

        # Mask out unavailable actions
        target_chosen_action_qvals = th.gather(target_mac_out, 3,
                                               batch['actions']).squeeze(-1)

        # Mix
        if self.mixer is None:
            target_qvals = target_chosen_action_qvals
        else:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            if not self.args.SubAVG_Mixer_flag:
                target_qvals = self.target_mixer(target_chosen_action_qvals,
                                                 batch['state'])
            elif self.args.mixer == "qmix":
                target_max_qvals_sum = 0
                self.target_mixer_out_list = []
                for i in range(self.args.SubAVG_Mixer_K):
                    targe_mixer_out = self.target_mixer_list[i](
                        target_chosen_action_qvals, batch['state'])
                    target_max_qvals_sum = target_max_qvals_sum + targe_mixer_out
                    if self.args.SubAVG_Mixer_flag_select:
                        self.target_mixer_out_list.append(targe_mixer_out)
                target_max_qvals = target_max_qvals_sum / self.args.SubAVG_Mixer_K

                # levin: mixer select
                if self.args.SubAVG_Mixer_flag_select:
                    if self.args.SubAVG_Mixer_name_select_replacement == 'mean':
                        target_mixer_select_sum = 0
                        for i in range(self.args.SubAVG_Mixer_K):
                            if self.args.SubAVG_Mixer_flag_select > 0:
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] <
                                    target_max_qvals, target_max_qvals,
                                    self.target_mixer_out_list[i])
                            else:
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] >
                                    target_max_qvals, target_max_qvals,
                                    self.target_mixer_out_list[i])
                            target_mixer_select_sum = target_mixer_select_sum + target_mixer_select
                        target_max_qvals = target_mixer_select_sum / self.args.SubAVG_Mixer_K
                    elif self.args.SubAVG_Mixer_name_select_replacement == 'zero':
                        target_mixer_select_sum = 0
                        target_mixer_select_bool_sum = 0
                        for i in range(self.args.SubAVG_Mixer_K):
                            if self.args.SubAVG_Mixer_flag_select > 0:
                                target_mixer_select_bool = (
                                    self.target_mixer_out_list[i] >
                                    target_max_qvals).float()
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] >
                                    target_max_qvals,
                                    self.target_mixer_out_list[i],
                                    th.full_like(target_max_qvals, 0))
                            else:
                                target_mixer_select_bool = (
                                    self.target_mixer_out_list[i] <
                                    target_max_qvals).float()
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] <
                                    target_max_qvals,
                                    self.target_mixer_out_list[i],
                                    th.full_like(target_max_qvals, 0))
                            target_mixer_select_bool_sum = target_mixer_select_bool_sum + target_mixer_select_bool
                            target_mixer_select_sum = target_mixer_select_sum + target_mixer_select
                        if self.levin_iter_target_mixer_update < 2:
                            pass  # print("using average-mix directly")
                        else:
                            target_max_qvals = target_mixer_select_sum / target_mixer_select_bool_sum
                target_qvals = target_max_qvals

        if self.args.td_lambda <= 1 and self.args.td_lambda > 0:
            targets = build_td_lambda_targets(rewards, terminated, mask,
                                              target_qvals, self.args.n_agents,
                                              self.args.gamma,
                                              self.args.td_lambda)
        else:
            if self.args.td_lambda == 0:
                n = 1  # 1-step TD
            else:
                n = self.args.td_lambda

            targets = th.zeros_like(batch['reward'])
            targets += batch['reward']

            for i in range(1, n):
                targets[:, :-i] += (self.args.gamma**i) * (
                    1 - terminated[:, i - 1:]) * batch['reward'][:, i:]
            targets[:, :-n] += (self.args.gamma**n) * (
                1 - terminated[:, n - 1:]) * target_qvals[:, n:]

            targets = targets[:, :-1]

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask
        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error**2).sum() / mask.sum() * 2

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            # self.logger.log_stat("loss_levin", loss_levin.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env
    def train_critic_sequential(self, critic, target_critic, optimiser, batch, rewards, terminated, actions,
                                avail_actions, mask):
        # Optimise critic
        target_vals = target_critic(batch)

        all_vals = th.zeros_like(target_vals)

        if critic.output_type == 'q':
            target_vals = th.gather(target_vals, dim=3, index=actions)
            # target_vals = th.cat([target_vals[:, 1:], th.zeros_like(target_vals[:, 0:1])], dim=1)
        target_vals = target_vals.squeeze(3)

        # Calculate td-lambda targets
        targets = build_td_lambda_targets(rewards, terminated, mask, target_vals, self.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        running_log = {
            "critic_loss": [],
            "critic_grad_norm": [],
            "td_error_abs": [],
            "target_mean": [],
            "q_taken_mean": [],
        }

        for t in reversed(range(rewards.size(1) + 1)):
            vals_t = critic(batch, t)
            all_vals[:, t] = vals_t.squeeze(1)

            if t == rewards.size(1):
                continue

            mask_t = mask[:, t]
            if mask_t.sum() == 0:
                continue

            if critic.output_type == "q":
                vals_t = th.gather(vals_t, dim=3, index=actions[:, t:t+1]).squeeze(3).squeeze(1)
            else:
                vals_t = vals_t.squeeze(3).squeeze(1)
            targets_t = targets[:, t]

            td_error = (vals_t - targets_t.detach())

            # 0-out the targets that came from padded data
            masked_td_error = td_error * mask_t

            # Normal L2 loss, take mean over actual data
            loss = (masked_td_error ** 2).sum() / mask_t.sum()  # Not dividing by number of agents, only # valid timesteps
            optimiser.zero_grad()
            loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(optimiser.param_groups[0]["params"], self.args.grad_norm_clip)
            optimiser.step()
            self.critic_training_steps += 1

            running_log["critic_loss"].append(loss.item())
            running_log["critic_grad_norm"].append(grad_norm)
            mask_elems = mask_t.sum().item()
            running_log["td_error_abs"].append((masked_td_error.abs().sum().item() / mask_elems))
            running_log["q_taken_mean"].append((vals_t * mask_t).sum().item() / mask_elems)
            running_log["target_mean"].append((targets_t * mask_t).sum().item() / mask_elems)

        if critic.output_type == 'q':
            q_vals = all_vals[:, :-1]
            v_s = None
        else:
            q_vals = all_vals[:, :-1].squeeze(3)
            v_s = all_vals[:, :-1].squeeze(3)

        return q_vals, v_s, running_log
Exemple #11
0
    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals_agents = th.gather(mac_out[:, :-1],
                                               dim=3,
                                               index=actions).squeeze(
                                                   3)  # Remove the last dim
        chosen_action_qvals = chosen_action_qvals_agents

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out[:],
                                  dim=1)  # Concat across time

        # Mask out unavailable actions
        target_mac_out[avail_actions[:, :] == 0] = -9999999  # From OG deepmarl

        # Max over target Q-Values
        if self.args.double_q:
            # Get actions that maximise live Q (for double q-learning)
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            cur_max_action_targets, cur_max_actions = mac_out_detach[:, :].max(
                dim=3, keepdim=True)
            target_max_agent_qvals = th.gather(
                target_mac_out[:, :], 3, cur_max_actions[:, :]).squeeze(3)
        else:
            raise Exception("Use double q")

        # Central MAC stuff
        central_mac_out = []
        self.central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.central_mac.forward(batch, t=t)
            central_mac_out.append(agent_outs)
        central_mac_out = th.stack(central_mac_out, dim=1)  # Concat over time
        central_chosen_action_qvals_agents = th.gather(
            central_mac_out[:, :-1],
            dim=3,
            index=actions.unsqueeze(4).repeat(
                1, 1, 1, 1, self.args.central_action_embed)).squeeze(
                    3)  # Remove the last dim

        central_target_mac_out = []
        self.target_central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_central_mac.forward(batch, t=t)
            central_target_mac_out.append(target_agent_outs)
        central_target_mac_out = th.stack(central_target_mac_out[:],
                                          dim=1)  # Concat across time
        # Mask out unavailable actions
        central_target_mac_out[avail_actions[:, :] ==
                               0] = -9999999  # From OG deepmarl
        # Use the Qmix max actions
        central_target_max_agent_qvals = th.gather(
            central_target_mac_out[:, :], 3,
            cur_max_actions[:, :].unsqueeze(4).repeat(
                1, 1, 1, 1, self.args.central_action_embed)).squeeze(3)
        # ---

        # Mix
        chosen_action_qvals = self.mixer(chosen_action_qvals,
                                         batch["state"][:, :-1])
        target_max_qvals = self.target_central_mixer(
            central_target_max_agent_qvals, batch["state"])

        # We use the calculation function of sarsa lambda to approximate q star lambda
        targets = build_td_lambda_targets(rewards, terminated, mask,
                                          target_max_qvals, self.args.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        # Td-error
        td_error = (chosen_action_qvals - (targets.detach()))

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Training central Q
        central_chosen_action_qvals = self.central_mixer(
            central_chosen_action_qvals_agents, batch["state"][:, :-1])
        central_td_error = (central_chosen_action_qvals - targets.detach())
        central_mask = mask.expand_as(central_td_error)
        central_masked_td_error = central_td_error * central_mask
        central_loss = 0.5 * (central_masked_td_error**2).sum() / mask.sum()

        # QMIX loss with weighting
        ws = th.ones_like(td_error) * self.args.w
        if self.args.hysteretic_qmix:  # OW-QMIX
            ws = th.where(td_error < 0,
                          th.ones_like(td_error) * 1,
                          ws)  # Target is greater than current max
            w_to_use = ws.mean().item()  # For logging
        else:  # CW-QMIX
            is_max_action = (actions == cur_max_actions[:, :-1]).min(dim=2)[0]
            max_action_qtot = self.target_central_mixer(
                central_target_max_agent_qvals[:, :-1], batch["state"][:, :-1])
            qtot_larger = targets > max_action_qtot
            ws = th.where(is_max_action | qtot_larger,
                          th.ones_like(td_error) * 1,
                          ws)  # Target is greater than current max
            w_to_use = ws.mean().item()  # Average of ws for logging

        qmix_loss = (ws.detach() * (masked_td_error**2)).sum() / mask.sum()

        # The weightings for the different losses aren't used (they are always set to 1)
        loss = self.args.qmix_loss * qmix_loss + self.args.central_loss * central_loss

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()

        # Logging
        agent_norm = 0
        for p in self.mac_params:
            param_norm = p.grad.data.norm(2)
            agent_norm += param_norm.item()**2
        agent_norm = agent_norm**(1. / 2)

        mixer_norm = 0
        for p in self.mixer_params:
            param_norm = p.grad.data.norm(2)
            mixer_norm += param_norm.item()**2
        mixer_norm = mixer_norm**(1. / 2)
        self.mixer_norm = mixer_norm
        self.mixer_norms.append(mixer_norm)

        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.grad_norm = grad_norm

        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("qmix_loss", qmix_loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            self.logger.log_stat("mixer_norm", mixer_norm, t_env)
            self.logger.log_stat("agent_norm", agent_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("central_loss", central_loss.item(), t_env)
            self.logger.log_stat("w_to_use", w_to_use, t_env)
            self.log_stats_t = t_env
Exemple #12
0
    def train(self,
              batch: EpisodeBatch,
              t_env: int,
              episode_num: int,
              off=False):
        # Get the relevant data
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        actions_onehot = batch["actions_onehot"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Retrace Q target
        with th.no_grad():
            q1, _ = self.target_critic(batch, batch["actions_onehot"].detach())
            target_vals = self.target_mixer(q1, batch["state"])

            lambd = 0 if off else self.args.lambd
            target_vals = build_td_lambda_targets(rewards, terminated, mask,
                                                  target_vals, self.n_agents,
                                                  self.args.gamma, lambd)

        # Train the critic
        # Current Q network forward
        q1, _ = self.critic(batch[:, :-1], actions_onehot.detach())
        q_taken = self.mixer(q1, batch["state"][:, :-1])
        critic_loss = 0.5 * (
            (q_taken - target_vals.detach()) * mask).pow(2).sum() / mask.sum()

        self.critic_optimiser.zero_grad()
        critic_loss.backward()
        critic_grad_norm = th.nn.utils.clip_grad_norm_(
            self.critic_params, self.args.grad_norm_clip)
        self.critic_optimiser.step()

        # Train the actor
        if not off:
            pi = []
            self.mac.init_hidden(batch.batch_size)
            for t in range(batch.max_seq_length - 1):
                agent_outs = self.mac.forward(batch, t=t)
                pi.append(agent_outs)
            pi = th.stack(pi, dim=1)  # Concat over time b, t, a, probs

            q1, _ = self.critic(batch[:, :-1], pi)
            q = self.mixer(q1, batch["state"][:, :-1])
            pg_loss = -(q * mask).sum() / mask.sum()

            entropy_loss = categorical_entropy(pi).mean(
                -1, keepdim=True)  # mean over agents
            entropy_loss[mask == 0] = 0  # fill nan
            entropy_loss = (entropy_loss * mask).sum() / mask.sum()
            loss = pg_loss - self.args.entropy_coef * entropy_loss / entropy_loss.item(
            )

            self.agent_optimiser.zero_grad()
            loss.backward()
            agent_grad_norm = th.nn.utils.clip_grad_norm_(
                self.agent_params, self.args.grad_norm_clip)
            self.agent_optimiser.step()

        # target_update
        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        # log
        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("critic_loss", critic_loss.item(), t_env)
            self.logger.log_stat("critic_grad_norm", critic_grad_norm.item(),
                                 t_env)
            self.logger.log_stat("target_vals",
                                 (target_vals * mask).sum().item() /
                                 mask.sum().item(), t_env)

            if not off:
                self.logger.log_stat("pg_loss", pg_loss.item(), t_env)
                self.logger.log_stat("entropy_loss", entropy_loss.item(),
                                     t_env)
                self.logger.log_stat("agent_grad_norm", agent_grad_norm.item(),
                                     t_env)
                agent_mask = mask.repeat(1, 1, self.n_agents)
                self.logger.log_stat(
                    "pi_max", (pi.max(dim=-1)[0] * agent_mask).sum().item() /
                    agent_mask.sum().item(), t_env)
                self.log_stats_t = t_env
Exemple #13
0
    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(
                                            3)  # Remove the last dim
        chosen_action_qvals_ = chosen_action_qvals

        # Calculate the Q-Values necessary for the target
        with th.no_grad():
            target_mac_out = []
            self.target_mac.init_hidden(batch.batch_size)
            for t in range(batch.max_seq_length):
                target_agent_outs = self.target_mac.forward(batch, t=t)
                target_mac_out.append(target_agent_outs)

            # We don't need the first timesteps Q-Value estimate for calculating targets
            target_mac_out = th.stack(target_mac_out,
                                      dim=1)  # Concat across time

            # Max over target Q-Values/ Double q learning
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            cur_max_actions = mac_out_detach.max(dim=3, keepdim=True)[1]
            target_max_qvals = th.gather(target_mac_out, 3,
                                         cur_max_actions).squeeze(3)

            # Calculate n-step Q-Learning targets
            target_max_qvals = self.target_mixer(target_max_qvals,
                                                 batch["state"])

            if getattr(self.args, 'q_lambda', False):
                qvals = th.gather(target_mac_out, 3,
                                  batch["actions"]).squeeze(3)
                qvals = self.target_mixer(qvals, batch["state"])

                targets = build_q_lambda_targets(rewards, terminated, mask,
                                                 target_max_qvals, qvals,
                                                 self.args.gamma,
                                                 self.args.td_lambda)
            else:
                targets = build_td_lambda_targets(rewards, terminated, mask,
                                                  target_max_qvals,
                                                  self.args.n_agents,
                                                  self.args.gamma,
                                                  self.args.td_lambda)

        # Mixer
        chosen_action_qvals = self.mixer(chosen_action_qvals,
                                         batch["state"][:, :-1])

        td_error = (chosen_action_qvals - targets.detach())
        td_error = 0.5 * td_error.pow(2)

        mask = mask.expand_as(td_error)
        masked_td_error = td_error * mask
        loss = L_td = masked_td_error.sum() / mask.sum()

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss_td", L_td.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env

            # print estimated matrix
            if self.args.env == "one_step_matrix_game":
                print_matrix_status(batch, self.mixer, mac_out)
Exemple #14
0
    def _train_critic(self, batch, rewards, terminated, actions, avail_actions,
                      mask, bs, max_t):
        # Optimise critic
        target_q_vals = []
        for idx, target_critic_ in enumerate(self.target_critic):
            target_q_vals.append(target_critic_(batch)[:, :])

        targets_taken = [
            th.gather(target_q_vals_, dim=3, index=actions_).squeeze(3)
            for target_q_vals_, actions_ in zip(target_q_vals, actions)
        ]

        # Calculate td-lambda targets
        targets = [
            build_td_lambda_targets(rewards_, terminated, mask,
                                    targets_taken_, n_agents_,
                                    self.args.gamma,
                                    self.args.td_lambda)
            for rewards_, targets_taken_, n_agents_
            in zip(rewards, targets_taken, self.n_agents)
        ]

        q_vals = [th.zeros_like(target_q_vals_)[:, :-1]
                  for target_q_vals_ in target_q_vals]

        running_log = {
            "critic_loss_1": [],
            "critic_loss_2": [],
            "critic_grad_norm_1": [],
            "critic_grad_norm_2": [],
            "td_error_abs_1": [],
            "td_error_abs_2": [],
            "target_mean_1": [],
            "target_mean_2": [],
            "q_taken_mean_1": [],
            "q_taken_mean_2": [],

        }
        for idx, (rewards_, n_agents_, critic_, targets_,
                  critic_optimiser_, critic_params_) in enumerate(
            zip(rewards, self.n_agents, self.critic, targets,
                self.critic_optimiser, self.critic_params)):

            for t in reversed(range(rewards_.size(1))):
                mask_t = mask[:, t].expand(-1, n_agents_)
                if mask_t.sum() == 0:
                    continue

                q_t = critic_(batch, t)

                q_vals[idx][:, t] = q_t.view(bs, n_agents_, self.n_actions)
                q_taken = th.gather(q_t, dim=3,
                                    index=actions[idx][:, t:t + 1]).squeeze(
                    3).squeeze(1)
                targets_t = targets_[:, t]

                td_error = (q_taken - targets_t.detach())

                # 0-out the targets that came from padded data
                masked_td_error = td_error * mask_t

                # Normal L2 loss, take mean over actual data
                loss = (masked_td_error ** 2).sum() / mask_t.sum()
                critic_optimiser_.zero_grad()
                loss.backward()
                grad_norm = th.nn.utils.clip_grad_norm_(critic_params_,
                                                        self.args.grad_norm_clip)
                critic_optimiser_.step()
                self.critic_training_steps += 1

                running_log["critic_loss_" + str(idx + 1)].append(loss.item())
                running_log["critic_grad_norm_" + str(idx + 1)].append(
                    grad_norm)
                mask_elems = mask_t.sum().item()
                running_log["td_error_abs_" + str(idx + 1)].append(
                    (masked_td_error.abs().sum().item() / mask_elems))
                running_log["q_taken_mean_" + str(idx + 1)].append(
                    (q_taken * mask_t).sum().item() / mask_elems)
                running_log["target_mean_" + str(idx + 1)].append(
                    (targets_t * mask_t).sum().item() / mask_elems)

        return q_vals, running_log
Exemple #15
0
    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(
                                            3)  # Remove the last dim

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out, dim=1)  # Concat across time

        target_chosen_action_qvals = th.gather(target_mac_out,
                                               3, batch['actions']).squeeze(
                                                   -1)  # [32, 52, 3]

        if self.mixer is None:
            target_qvals = target_chosen_action_qvals
        else:
            target_qvals = self.target_mixer(target_chosen_action_qvals,
                                             batch['state'])  # [32, 52, 1]
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])

        if self.args.td_lambda <= 1 and self.args.td_lambda > 0:  # (0,1] for td-lambda
            targets = build_td_lambda_targets(
                rewards, terminated, mask, target_qvals, self.args.n_agents,
                self.args.gamma, self.args.td_lambda)  # [32, 51, 1]
        else:
            if self.args.td_lambda == 0:
                n = 1  # 1-step TD
            else:
                n = self.args.td_lambda

            targets = th.zeros_like(batch['reward'])
            targets += batch['reward']

            for i in range(1, n):
                targets[:, :-i] += (self.args.gamma**i) * (
                    1 - terminated[:, i - 1:]) * batch['reward'][:, i:]
            targets[:, :-n] += (self.args.gamma**n) * (
                1 - terminated[:, n - 1:]) * target_qvals[:, n:]

            targets = targets[:, :-1]

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error**2).sum() / mask.sum()

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env
Exemple #16
0
    def sub_train(self,
                  batch: EpisodeBatch,
                  t_env: int,
                  episode_num: int,
                  mac,
                  mixer,
                  optimiser,
                  params,
                  show_demo=False,
                  save_data=None):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]
        actions_onehot = batch["actions_onehot"][:, :-1]

        # Calculate estimated Q-Values
        mac_out = []
        mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(
                                            3)  # Remove the last dim

        x_mac_out = mac_out.clone().detach()
        x_mac_out[avail_actions == 0] = -9999999
        max_action_qvals, max_action_index = x_mac_out[:, :-1].max(dim=3)

        max_action_index = max_action_index.detach().unsqueeze(3)
        is_max_action = (max_action_index == actions).int().float()

        if show_demo:
            q_i_data = chosen_action_qvals.detach().cpu().numpy()
            q_data = (max_action_qvals -
                      chosen_action_qvals).detach().cpu().numpy()
            # self.logger.log_stat('agent_1_%d_q_1' % save_data[0], np.squeeze(q_data)[0], t_env)
            # self.logger.log_stat('agent_2_%d_q_2' % save_data[1], np.squeeze(q_data)[1], t_env)

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out, dim=1)  # Concat across time

        # Mask out unavailable actions
        target_mac_out[avail_actions == 0] = -9999999

        # Max over target Q-Values
        if self.args.double_q:
            # Get actions that maximise live Q (for double q-learning)
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            cur_max_actions = mac_out_detach.max(dim=3, keepdim=True)[1]
            target_chosen_qvals = th.gather(target_mac_out, 3,
                                            cur_max_actions).squeeze(3)
            target_max_qvals = target_mac_out.max(dim=3)[0]

            cur_max_actions_onehot = th.zeros(
                cur_max_actions.squeeze(3).shape + (self.n_actions, )).cuda()
            cur_max_actions_onehot = cur_max_actions_onehot.scatter_(
                3, cur_max_actions, 1)
        else:
            raise "Use Double Q"

        # Mix
        if mixer is not None:
            ans_chosen = mixer(chosen_action_qvals,
                               batch["state"][:, :-1],
                               is_v=True)
            ans_adv = mixer(chosen_action_qvals,
                            batch["state"][:, :-1],
                            actions=actions_onehot,
                            max_q_i=max_action_qvals,
                            is_v=False)
            chosen_action_qvals = ans_chosen + ans_adv

            if self.args.double_q:
                target_chosen = self.target_mixer(target_chosen_qvals,
                                                  batch["state"],
                                                  is_v=True)
                target_adv = self.target_mixer(target_chosen_qvals,
                                               batch["state"],
                                               actions=cur_max_actions_onehot,
                                               max_q_i=target_max_qvals,
                                               is_v=False)
                target_max_qvals = target_chosen + target_adv
            else:
                raise "Use Double Q"

        # Calculate 1-step Q-Learning targets
        targets = build_td_lambda_targets(rewards, terminated, mask,
                                          target_max_qvals, self.args.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        if show_demo:
            tot_q_data = chosen_action_qvals.detach().cpu().numpy()
            tot_target = targets.detach().cpu().numpy()
            print('action_pair_%d_%d' % (save_data[0], save_data[1]),
                  np.squeeze(q_data[:, 0]), np.squeeze(q_i_data[:, 0]),
                  np.squeeze(tot_q_data[:, 0]), np.squeeze(tot_target[:, 0]))
            self.logger.log_stat(
                'action_pair_%d_%d' % (save_data[0], save_data[1]),
                np.squeeze(tot_q_data[:, 0]), t_env)
            return

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Normal L2 loss, take mean over actual data
        loss = 0.5 * (masked_td_error**2).sum() / mask.sum()

        masked_hit_prob = th.mean(is_max_action, dim=2) * mask
        hit_prob = masked_hit_prob.sum() / mask.sum()

        # Optimise
        optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(params,
                                                self.args.grad_norm_clip)
        optimiser.step()

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("hit_prob", hit_prob.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env