Exemple #1
0
class OffPGLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.n_agents = args.n_agents
        self.n_actions = args.n_actions
        self.mac = mac
        self.logger = logger

        self.last_target_update_step = 0
        self.critic_training_steps = 0

        self.log_stats_t = -self.args.learner_log_interval - 1

        self.critic = OffPGCritic(scheme, args)
        self.mixer = QMixer(args)
        self.target_critic = copy.deepcopy(self.critic)
        self.target_mixer = copy.deepcopy(self.mixer)

        self.agent_params = list(mac.parameters())
        self.critic_params = list(self.critic.parameters())
        self.mixer_params = list(self.mixer.parameters())
        self.params = self.agent_params + self.critic_params
        self.c_params = self.critic_params + self.mixer_params

        self.agent_optimiser = RMSprop(params=self.agent_params,
                                       lr=args.lr,
                                       alpha=args.optim_alpha,
                                       eps=args.optim_eps)
        self.critic_optimiser = RMSprop(params=self.critic_params,
                                        lr=args.critic_lr,
                                        alpha=args.optim_alpha,
                                        eps=args.optim_eps)
        self.mixer_optimiser = RMSprop(params=self.mixer_params,
                                       lr=args.critic_lr,
                                       alpha=args.optim_alpha,
                                       eps=args.optim_eps)

    def train(self, batch: EpisodeBatch, t_env: int, log):
        # Get the relevant quantities
        bs = batch.batch_size
        max_t = batch.max_seq_length
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        avail_actions = batch["avail_actions"][:, :-1]
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        mask = mask.repeat(1, 1, self.n_agents).view(-1)
        states = batch["state"][:, :-1]

        #build q
        inputs = self.critic._build_inputs(batch, bs, max_t)
        q_vals = self.critic.forward(inputs).detach()[:, :-1]

        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length - 1):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0

        # Calculated baseline
        q_taken = th.gather(q_vals, dim=3, index=actions).squeeze(3)
        pi = mac_out.view(-1, self.n_actions)
        baseline = th.sum(mac_out * q_vals, dim=-1).view(-1).detach()

        # Calculate policy grad with mask
        pi_taken = th.gather(pi, dim=1, index=actions.reshape(-1,
                                                              1)).squeeze(1)
        pi_taken[mask == 0] = 1.0
        log_pi_taken = th.log(pi_taken)
        coe = self.mixer.k(states).view(-1)

        advantages = (q_taken.view(-1) - baseline).detach()

        coma_loss = -(
            (coe * advantages * log_pi_taken) * mask).sum() / mask.sum()

        # Optimise agents
        self.agent_optimiser.zero_grad()
        coma_loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.agent_params,
                                                self.args.grad_norm_clip)
        self.agent_optimiser.step()

        #compute parameters sum for debugging
        p_sum = 0.
        for p in self.agent_params:
            p_sum += p.data.abs().sum().item() / 100.0

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            ts_logged = len(log["critic_loss"])
            for key in [
                    "critic_loss", "critic_grad_norm", "td_error_abs",
                    "q_taken_mean", "target_mean", "q_max_mean", "q_min_mean",
                    "q_max_var", "q_min_var"
            ]:
                self.logger.log_stat(key, sum(log[key]) / ts_logged, t_env)
            self.logger.log_stat("q_max_first", log["q_max_first"], t_env)
            self.logger.log_stat("q_min_first", log["q_min_first"], t_env)
            #self.logger.log_stat("advantage_mean", (advantages * mask).sum().item() / mask.sum().item(), t_env)
            self.logger.log_stat("coma_loss", coma_loss.item(), t_env)
            self.logger.log_stat("agent_grad_norm", grad_norm, t_env)
            self.logger.log_stat("pi_max",
                                 (pi.max(dim=1)[0] * mask).sum().item() /
                                 mask.sum().item(), t_env)
            self.log_stats_t = t_env

    def train_critic(self, on_batch, best_batch=None, log=None):
        bs = on_batch.batch_size
        max_t = on_batch.max_seq_length
        rewards = on_batch["reward"][:, :-1]
        actions = on_batch["actions"][:, :]
        terminated = on_batch["terminated"][:, :-1].float()
        mask = on_batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = on_batch["avail_actions"][:]
        states = on_batch["state"]

        #build_target_q
        target_inputs = self.target_critic._build_inputs(on_batch, bs, max_t)
        target_q_vals = self.target_critic.forward(target_inputs).detach()
        targets_taken = self.target_mixer(
            th.gather(target_q_vals, dim=3, index=actions).squeeze(3), states)
        target_q = build_td_lambda_targets(rewards, terminated, mask,
                                           targets_taken, self.n_agents,
                                           self.args.gamma,
                                           self.args.td_lambda).detach()

        inputs = self.critic._build_inputs(on_batch, bs, max_t)

        mac_out = []
        self.mac.init_hidden(bs)
        for i in range(max_t):
            agent_outs = self.mac.forward(on_batch, t=i)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1).detach()
        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0

        if best_batch is not None:
            best_target_q, best_inputs, best_mask, best_actions, best_mac_out = self.train_critic_best(
                best_batch)
            log["best_reward"] = th.mean(
                best_batch["reward"][:, :-1].squeeze(2).sum(-1), dim=0)
            target_q = th.cat((target_q, best_target_q), dim=0)
            inputs = th.cat((inputs, best_inputs), dim=0)
            mask = th.cat((mask, best_mask), dim=0)
            actions = th.cat((actions, best_actions), dim=0)
            states = th.cat((states, best_batch["state"]), dim=0)
            mac_out = th.cat((mac_out, best_mac_out), dim=0)

        #train critic
        mac_out = mac_out.detach()
        for t in range(max_t - 1):
            mask_t = mask[:, t:t + 1]
            if mask_t.sum() < 0.5:
                continue
            k = self.mixer.k(states[:, t:t + 1]).unsqueeze(3)
            #b = self.mixer.b(states[:, t:t+1])
            q_vals = self.critic.forward(inputs[:, t:t + 1])
            q_ori = q_vals
            q_vals = th.gather(q_vals, 3, index=actions[:, t:t + 1]).squeeze(3)
            q_vals = self.mixer.forward(q_vals, states[:, t:t + 1])
            target_q_t = target_q[:, t:t + 1].detach()
            q_err = (q_vals - target_q_t) * mask_t
            critic_loss = (q_err**2).sum() / mask_t.sum()
            #Here introduce the loss for Qi
            v_vals = th.sum(q_ori * mac_out[:, t:t + 1], dim=3, keepdim=True)
            ad_vals = q_ori - v_vals
            goal = th.sum(k * v_vals, dim=2, keepdim=True) + k * ad_vals
            goal_err = (goal - q_ori) * mask_t
            goal_loss = 0.1 * (goal_err**
                               2).sum() / mask_t.sum() / self.args.n_actions
            #critic_loss += goal_loss
            self.critic_optimiser.zero_grad()
            self.mixer_optimiser.zero_grad()
            critic_loss.backward()
            grad_norm = th.nn.utils.clip_grad_norm_(self.c_params,
                                                    self.args.grad_norm_clip)
            self.critic_optimiser.step()
            self.mixer_optimiser.step()
            self.critic_training_steps += 1

            log["critic_loss"].append(critic_loss.item())
            log["critic_grad_norm"].append(grad_norm)
            mask_elems = mask_t.sum().item()
            log["td_error_abs"].append((q_err.abs().sum().item() / mask_elems))
            log["target_mean"].append(
                (target_q_t * mask_t).sum().item() / mask_elems)
            log["q_taken_mean"].append(
                (q_vals * mask_t).sum().item() / mask_elems)
            log["q_max_mean"].append(
                (th.mean(q_ori.max(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)
            log["q_min_mean"].append(
                (th.mean(q_ori.min(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)
            log["q_max_var"].append(
                (th.var(q_ori.max(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)
            log["q_min_var"].append(
                (th.var(q_ori.min(dim=3)[0], dim=2, keepdim=True) *
                 mask_t).sum().item() / mask_elems)

            if (t == 0):
                log["q_max_first"] = (
                    th.mean(q_ori.max(dim=3)[0], dim=2, keepdim=True) *
                    mask_t).sum().item() / mask_elems
                log["q_min_first"] = (
                    th.mean(q_ori.min(dim=3)[0], dim=2, keepdim=True) *
                    mask_t).sum().item() / mask_elems

        #update target network
        if (self.critic_training_steps - self.last_target_update_step
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_step = self.critic_training_steps

    def train_critic_best(self, batch):
        bs = batch.batch_size
        max_t = batch.max_seq_length
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:]
        states = batch["state"]

        # pr for all actions of the episode
        mac_out = []
        self.mac.init_hidden(bs)
        for i in range(max_t):
            agent_outs = self.mac.forward(batch, t=i)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1).detach()
        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0
        critic_mac = th.gather(mac_out, 3,
                               actions).squeeze(3).prod(dim=2, keepdim=True)

        #target_q take
        target_inputs = self.target_critic._build_inputs(batch, bs, max_t)
        target_q_vals = self.target_critic.forward(target_inputs).detach()
        targets_taken = self.target_mixer(
            th.gather(target_q_vals, dim=3, index=actions).squeeze(3), states)

        #expected q
        exp_q = self.build_exp_q(target_q_vals, mac_out, states).detach()
        # td-error
        targets_taken[:,
                      -1] = targets_taken[:,
                                          -1] * (1 - th.sum(terminated, dim=1))
        exp_q[:, -1] = exp_q[:, -1] * (1 - th.sum(terminated, dim=1))
        targets_taken[:, :-1] = targets_taken[:, :-1] * mask
        exp_q[:, :-1] = exp_q[:, :-1] * mask
        td_q = (rewards + self.args.gamma * exp_q[:, 1:] -
                targets_taken[:, :-1]) * mask

        #compute target
        target_q = build_target_q(td_q, targets_taken[:, :-1], critic_mac,
                                  mask, self.args.gamma, self.args.tb_lambda,
                                  self.args.step).detach()

        inputs = self.critic._build_inputs(batch, bs, max_t)

        return target_q, inputs, mask, actions, mac_out

    def build_exp_q(self, target_q_vals, mac_out, states):
        target_exp_q_vals = th.sum(target_q_vals * mac_out, dim=3)
        target_exp_q_vals = self.target_mixer.forward(target_exp_q_vals,
                                                      states)
        return target_exp_q_vals

    def _update_targets(self):
        self.target_critic.load_state_dict(self.critic.state_dict())
        self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.critic.cuda()
        self.mixer.cuda()
        self.target_critic.cuda()
        self.target_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        th.save(self.critic.state_dict(), "{}/critic.th".format(path))
        th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.agent_optimiser.state_dict(),
                "{}/agent_opt.th".format(path))
        th.save(self.critic_optimiser.state_dict(),
                "{}/critic_opt.th".format(path))
        th.save(self.mixer_optimiser.state_dict(),
                "{}/mixer_opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        self.critic.load_state_dict(
            th.load("{}/critic.th".format(path),
                    map_location=lambda storage, loc: storage))
        self.mixer.load_state_dict(
            th.load("{}/mixer.th".format(path),
                    map_location=lambda storage, loc: storage))
        # Not quite right but I don't want to save target networks
        # self.target_critic.load_state_dict(self.critic.agent.state_dict())
        self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.agent_optimiser.load_state_dict(
            th.load("{}/agent_opt.th".format(path),
                    map_location=lambda storage, loc: storage))
        self.critic_optimiser.load_state_dict(
            th.load("{}/critic_opt.th".format(path),
                    map_location=lambda storage, loc: storage))
        self.mixer_optimiser.load_state_dict(
            th.load("{}/mixer_opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #2
0
class QLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.params = list(mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        self.log_stats_t = -self.args.learner_log_interval - 1

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3)  # Remove the last dim

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out[1:], dim=1)  # Concat across time

        # Mask out unavailable actions
        target_mac_out[avail_actions[:, 1:] == 0] = -9999999

        # Max over target Q-Values
        if self.args.double_q:
            # Get actions that maximise live Q (for double q-learning)
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
            target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
        else:
            target_max_qvals = target_mac_out.max(dim=3)[0]

        # Mix
        if self.mixer is not None:
            chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
            target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:])

        # Calculate 1-step Q-Learning targets
        targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error ** 2).sum() / mask.sum()

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
        self.optimiser.step()

        if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
            self.logger.log_stat("q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
Exemple #3
0
class RODELearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger
        self.n_agents = args.n_agents

        self.params = list(mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        self.role_mixer = None
        if args.role_mixer is not None:
            if args.role_mixer == "vdn":
                self.role_mixer = VDNMixer()
            elif args.role_mixer == "qmix":
                self.role_mixer = QMixer(args)
            else:
                raise ValueError("Role Mixer {} not recognised.".format(
                    args.role_mixer))
            self.params += list(self.role_mixer.parameters())
            self.target_role_mixer = copy.deepcopy(self.role_mixer)

        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        self.log_stats_t = -self.args.learner_log_interval - 1

        self.role_interval = args.role_interval
        self.device = self.args.device

        self.role_action_spaces_updated = True

        # action encoder
        self.action_encoder_params = list(self.mac.action_encoder_params())
        self.action_encoder_optimiser = RMSprop(
            params=self.action_encoder_params,
            lr=args.lr,
            alpha=args.optim_alpha,
            eps=args.optim_eps)

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]
        # role_avail_actions = batch["role_avail_actions"]
        roles_shape_o = batch["roles"][:, :-1].shape
        role_at = int(np.ceil(roles_shape_o[1] / self.role_interval))
        role_t = role_at * self.role_interval

        roles_shape = list(roles_shape_o)
        roles_shape[1] = role_t
        roles = th.zeros(roles_shape).to(self.device)
        roles[:, :roles_shape_o[1]] = batch["roles"][:, :-1]
        roles = roles.view(batch.batch_size, role_at, self.role_interval,
                           self.n_agents, -1)[:, :, 0]

        # Calculate estimated Q-Values
        mac_out = []
        role_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs, role_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
            if t % self.role_interval == 0 and t < batch.max_seq_length - 1:
                role_out.append(role_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        role_out = th.stack(role_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(
                                            3)  # Remove the last dim
        chosen_role_qvals = th.gather(role_out, dim=3,
                                      index=roles.long()).squeeze(3)

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        target_role_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs, target_role_outs = self.target_mac.forward(
                batch, t=t)
            target_mac_out.append(target_agent_outs)
            if t % self.role_interval == 0 and t < batch.max_seq_length - 1:
                target_role_out.append(target_role_outs)

        target_role_out.append(
            th.zeros(batch.batch_size, self.n_agents,
                     self.mac.n_roles).to(self.device))
        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out[1:],
                                  dim=1)  # Concat across time
        target_role_out = th.stack(target_role_out[1:], dim=1)

        # Mask out unavailable actions
        target_mac_out[avail_actions[:, 1:] == 0] = -9999999
        # target_mac_out[role_avail_actions[:, 1:] == 0] = -9999999

        # Max over target Q-Values
        if self.args.double_q:
            # Get actions that maximise live Q (for double q-learning)
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            # mac_out_detach[role_avail_actions == 0] = -9999999
            cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
            target_max_qvals = th.gather(target_mac_out, 3,
                                         cur_max_actions).squeeze(3)

            role_out_detach = role_out.clone().detach()
            role_out_detach = th.cat(
                [role_out_detach[:, 1:], role_out_detach[:, 0:1]], dim=1)
            cur_max_roles = role_out_detach.max(dim=3, keepdim=True)[1]
            target_role_max_qvals = th.gather(target_role_out, 3,
                                              cur_max_roles).squeeze(3)
        else:
            target_max_qvals = target_mac_out.max(dim=3)[0]
            target_role_max_qvals = target_role_out.max(dim=3)[0]

        # Mix
        if self.mixer is not None:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            target_max_qvals = self.target_mixer(target_max_qvals,
                                                 batch["state"][:, 1:])
        if self.role_mixer is not None:
            state_shape_o = batch["state"][:, :-1].shape
            state_shape = list(state_shape_o)
            state_shape[1] = role_t
            role_states = th.zeros(state_shape).to(self.device)
            role_states[:, :state_shape_o[1]] = batch["state"][:, :-1].detach(
            ).clone()
            role_states = role_states.view(batch.batch_size, role_at,
                                           self.role_interval, -1)[:, :, 0]
            chosen_role_qvals = self.role_mixer(chosen_role_qvals, role_states)
            role_states = th.cat([role_states[:, 1:], role_states[:, 0:1]],
                                 dim=1)
            target_role_max_qvals = self.target_role_mixer(
                target_role_max_qvals, role_states)

        # Calculate 1-step Q-Learning targets
        targets = rewards + self.args.gamma * (1 -
                                               terminated) * target_max_qvals
        rewards_shape = list(rewards.shape)
        rewards_shape[1] = role_t
        role_rewards = th.zeros(rewards_shape).to(self.device)
        role_rewards[:, :rewards.shape[1]] = rewards.detach().clone()
        role_rewards = role_rewards.view(batch.batch_size, role_at,
                                         self.role_interval).sum(dim=-1,
                                                                 keepdim=True)
        # role_terminated
        terminated_shape_o = terminated.shape
        terminated_shape = list(terminated_shape_o)
        terminated_shape[1] = role_t
        role_terminated = th.zeros(terminated_shape).to(self.device)
        role_terminated[:, :terminated_shape_o[1]] = terminated.detach().clone(
        )
        role_terminated = role_terminated.view(
            batch.batch_size, role_at, self.role_interval).sum(dim=-1,
                                                               keepdim=True)
        # role_terminated
        role_targets = role_rewards + self.args.gamma * (
            1 - role_terminated) * target_role_max_qvals

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())
        role_td_error = (chosen_role_qvals - role_targets.detach())

        mask = mask.expand_as(td_error)
        mask_shape = list(mask.shape)
        mask_shape[1] = role_t
        role_mask = th.zeros(mask_shape).to(self.device)
        role_mask[:, :mask.shape[1]] = mask.detach().clone()
        role_mask = role_mask.view(batch.batch_size, role_at,
                                   self.role_interval, -1)[:, :, 0]

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask
        masked_role_td_error = role_td_error * role_mask

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error**2).sum() / mask.sum()
        role_loss = (masked_role_td_error**2).sum() / role_mask.sum()
        loss += role_loss

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        pred_obs_loss = None
        pred_r_loss = None
        pred_grad_norm = None

        if self.role_action_spaces_updated:
            # train action encoder

            no_pred = []
            r_pred = []
            for t in range(batch.max_seq_length):
                no_preds, r_preds = self.mac.action_repr_forward(batch, t=t)
                no_pred.append(no_preds)
                r_pred.append(r_preds)
            no_pred = th.stack(no_pred, dim=1)[:, :-1]  # Concat over time
            r_pred = th.stack(r_pred, dim=1)[:, :-1]
            no = batch["obs"][:, 1:].detach().clone()
            repeated_rewards = batch["reward"][:, :-1].detach().clone(
            ).unsqueeze(2).repeat(1, 1, self.n_agents, 1)

            pred_obs_loss = th.sqrt(((no_pred - no)**2).sum(dim=-1)).mean()
            pred_r_loss = ((r_pred - repeated_rewards)**2).mean()

            pred_loss = pred_obs_loss + 10 * pred_r_loss
            self.action_encoder_optimiser.zero_grad()
            pred_loss.backward()
            pred_grad_norm = th.nn.utils.clip_grad_norm_(
                self.action_encoder_params, self.args.grad_norm_clip)
            self.action_encoder_optimiser.step()

            if t_env > self.args.role_action_spaces_update_start:
                self.mac.update_role_action_spaces()
                if 'noar' in self.args.mac:
                    self.target_mac.role_selector.update_roles(
                        self.mac.n_roles)
                self.role_action_spaces_updated = False
                self._update_targets()
                self.last_target_update_episode = episode_num

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", (loss - role_loss).item(), t_env)
            self.logger.log_stat("role_loss", role_loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            if pred_obs_loss is not None:
                self.logger.log_stat("pred_obs_loss", pred_obs_loss.item(),
                                     t_env)
                self.logger.log_stat("pred_r_loss", pred_r_loss.item(), t_env)
                self.logger.log_stat("action_encoder_grad_norm",
                                     pred_grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("role_q_taken_mean",
                                 (chosen_role_qvals * role_mask).sum().item() /
                                 (role_mask.sum().item() * self.args.n_agents),
                                 t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        if self.role_mixer is not None:
            self.target_role_mixer.load_state_dict(
                self.role_mixer.state_dict())
        self.target_mac.role_action_spaces_updated = self.role_action_spaces_updated
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()
        if self.role_mixer is not None:
            self.role_mixer.cuda()
            self.target_role_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        if self.role_mixer is not None:
            th.save(self.role_mixer.state_dict(),
                    "{}/role_mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
        th.save(self.action_encoder_optimiser.state_dict(),
                "{}/action_repr_opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        if self.role_mixer is not None:
            self.role_mixer.load_state_dict(
                th.load("{}/role_mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
        self.action_encoder_optimiser.load_state_dict(
            th.load("{}/action_repr_opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #4
0
class SLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.n_actions_levin = args.n_actions

        self.params = list(mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())

            if not args.SubAVG_Mixer_flag:
                self.target_mixer = copy.deepcopy(self.mixer)

            elif args.mixer == "qmix":
                self.target_mixer_list = []
                for i in range(self.args.SubAVG_Mixer_K):
                    self.target_mixer_list.append(copy.deepcopy(self.mixer))
                self.levin_iter_target_mixer_update = 0

        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        if not self.args.SubAVG_Agent_flag:
            self.target_mac = copy.deepcopy(mac)
        else:
            self.target_mac_list = []
            for i in range(self.args.SubAVG_Agent_K):
                self.target_mac_list.append(copy.deepcopy(mac))
            self.levin_iter_target_update = 0

        self.log_stats_t = -self.args.learner_log_interval - 1

        # ====== levin =====
        self.number = 0

    def train(self,
              batch: EpisodeBatch,
              t_env: int,
              episode_num: int,
              epsilon_levin=None):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(3)

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        if not self.args.SubAVG_Agent_flag:
            self.target_mac.init_hidden(batch.batch_size)
        else:
            for i in range(self.args.SubAVG_Agent_K):
                self.target_mac_list[i].init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            if not self.args.SubAVG_Agent_flag:
                target_agent_outs = self.target_mac.forward(batch, t=t)
            # exp:使用 average DQN的target_mac
            else:
                target_agent_outs = 0

                self.target_agent_out_list = []
                for i in range(self.args.SubAVG_Agent_K):
                    target_agent_out = self.target_mac_list[i].forward(batch,
                                                                       t=t)
                    target_agent_outs = target_agent_outs + target_agent_out
                    if self.args.SubAVG_Agent_flag_select:
                        self.target_agent_out_list.append(target_agent_out)
                target_agent_outs = target_agent_outs / self.args.SubAVG_Agent_K
                if self.args.SubAVG_Agent_flag_select:
                    if self.args.SubAVG_Agent_name_select_replacement == 'mean':
                        target_out_select_sum = 0
                        for i in range(self.args.SubAVG_Agent_K):
                            if self.args.SubAVG_Agent_flag_select > 0:
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] <
                                    target_agent_outs, target_agent_outs,
                                    self.target_agent_out_list[i])
                            else:
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] >
                                    target_agent_outs, target_agent_outs,
                                    self.target_agent_out_list[i])
                            target_out_select_sum = target_out_select_sum + target_out_select
                        target_agent_outs = target_out_select_sum / self.args.SubAVG_Agent_K
                    elif self.args.SubAVG_Agent_name_select_replacement == 'zero':
                        target_out_select_sum = 0
                        target_select_bool_sum = 0
                        for i in range(self.args.SubAVG_Agent_K):
                            if self.args.SubAVG_Agent_flag_select > 0:
                                target_select_bool = (
                                    self.target_agent_out_list[i] >
                                    target_agent_outs).float()
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] >
                                    target_agent_outs,
                                    self.target_agent_out_list[i],
                                    th.full_like(target_agent_outs, 0))
                            else:
                                target_select_bool = (
                                    self.target_agent_out_list[i] <
                                    target_agent_outs).float()
                                target_out_select = th.where(
                                    self.target_agent_out_list[i] <
                                    target_agent_outs,
                                    self.target_agent_out_list[i],
                                    th.full_like(target_agent_outs, 0))
                            target_select_bool_sum = target_select_bool_sum + target_select_bool
                            target_out_select_sum = target_out_select_sum + target_out_select
                        if self.levin_iter_target_update < 2:
                            pass  # print("using average directly")
                        else:
                            target_agent_outs = target_out_select_sum / target_select_bool_sum
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out, dim=1)  # Concat across time

        # Mask out unavailable actions
        target_chosen_action_qvals = th.gather(target_mac_out, 3,
                                               batch['actions']).squeeze(-1)

        # Mix
        if self.mixer is None:
            target_qvals = target_chosen_action_qvals
        else:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            if not self.args.SubAVG_Mixer_flag:
                target_qvals = self.target_mixer(target_chosen_action_qvals,
                                                 batch['state'])
            elif self.args.mixer == "qmix":
                target_max_qvals_sum = 0
                self.target_mixer_out_list = []
                for i in range(self.args.SubAVG_Mixer_K):
                    targe_mixer_out = self.target_mixer_list[i](
                        target_chosen_action_qvals, batch['state'])
                    target_max_qvals_sum = target_max_qvals_sum + targe_mixer_out
                    if self.args.SubAVG_Mixer_flag_select:
                        self.target_mixer_out_list.append(targe_mixer_out)
                target_max_qvals = target_max_qvals_sum / self.args.SubAVG_Mixer_K

                # levin: mixer select
                if self.args.SubAVG_Mixer_flag_select:
                    if self.args.SubAVG_Mixer_name_select_replacement == 'mean':
                        target_mixer_select_sum = 0
                        for i in range(self.args.SubAVG_Mixer_K):
                            if self.args.SubAVG_Mixer_flag_select > 0:
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] <
                                    target_max_qvals, target_max_qvals,
                                    self.target_mixer_out_list[i])
                            else:
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] >
                                    target_max_qvals, target_max_qvals,
                                    self.target_mixer_out_list[i])
                            target_mixer_select_sum = target_mixer_select_sum + target_mixer_select
                        target_max_qvals = target_mixer_select_sum / self.args.SubAVG_Mixer_K
                    elif self.args.SubAVG_Mixer_name_select_replacement == 'zero':
                        target_mixer_select_sum = 0
                        target_mixer_select_bool_sum = 0
                        for i in range(self.args.SubAVG_Mixer_K):
                            if self.args.SubAVG_Mixer_flag_select > 0:
                                target_mixer_select_bool = (
                                    self.target_mixer_out_list[i] >
                                    target_max_qvals).float()
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] >
                                    target_max_qvals,
                                    self.target_mixer_out_list[i],
                                    th.full_like(target_max_qvals, 0))
                            else:
                                target_mixer_select_bool = (
                                    self.target_mixer_out_list[i] <
                                    target_max_qvals).float()
                                target_mixer_select = th.where(
                                    self.target_mixer_out_list[i] <
                                    target_max_qvals,
                                    self.target_mixer_out_list[i],
                                    th.full_like(target_max_qvals, 0))
                            target_mixer_select_bool_sum = target_mixer_select_bool_sum + target_mixer_select_bool
                            target_mixer_select_sum = target_mixer_select_sum + target_mixer_select
                        if self.levin_iter_target_mixer_update < 2:
                            pass  # print("using average-mix directly")
                        else:
                            target_max_qvals = target_mixer_select_sum / target_mixer_select_bool_sum
                target_qvals = target_max_qvals

        if self.args.td_lambda <= 1 and self.args.td_lambda > 0:
            targets = build_td_lambda_targets(rewards, terminated, mask,
                                              target_qvals, self.args.n_agents,
                                              self.args.gamma,
                                              self.args.td_lambda)
        else:
            if self.args.td_lambda == 0:
                n = 1  # 1-step TD
            else:
                n = self.args.td_lambda

            targets = th.zeros_like(batch['reward'])
            targets += batch['reward']

            for i in range(1, n):
                targets[:, :-i] += (self.args.gamma**i) * (
                    1 - terminated[:, i - 1:]) * batch['reward'][:, i:]
            targets[:, :-n] += (self.args.gamma**n) * (
                1 - terminated[:, n - 1:]) * target_qvals[:, n:]

            targets = targets[:, :-1]

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask
        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error**2).sum() / mask.sum() * 2

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            # self.logger.log_stat("loss_levin", loss_levin.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        if not self.args.SubAVG_Agent_flag:
            self.target_mac.load_state(self.mac)
        else:
            self.number = self.levin_iter_target_update % self.args.SubAVG_Agent_K
            self.target_mac_list[self.number].load_state(self.mac)
            self.levin_iter_target_update = self.levin_iter_target_update + 1

        if self.mixer is not None:
            if not self.args.SubAVG_Mixer_flag:
                self.target_mixer.load_state_dict(self.mixer.state_dict())
            elif self.args.mixer == "qmix":
                mixer_number = self.levin_iter_target_mixer_update % self.args.SubAVG_Mixer_K
                self.target_mixer_list[mixer_number].load_state_dict(
                    self.mixer.state_dict())
                self.levin_iter_target_mixer_update = self.levin_iter_target_mixer_update + 1
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        if not self.args.SubAVG_Agent_flag:
            self.target_mac.cuda()
        else:
            for i in range(self.args.SubAVG_Agent_K):
                self.target_mac_list[i].cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            if not self.args.SubAVG_Mixer_flag:
                self.target_mixer.cuda()
            elif self.args.mixer == "qmix":
                for i in range(self.args.SubAVG_Mixer_K):
                    self.target_mixer_list[i].cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        if not self.args.SubAVG_Agent_flag:
            self.target_mac.load_models(path)
        else:
            for i in range(self.args.SubAVG_Agent_K):
                self.target_mac_list[i].load_models(path)

        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #5
0
class CateQLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.params = list(mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        self.log_stats_t = -self.args.learner_log_interval - 1

        self.s_mu = th.zeros(1)
        self.s_sigma = th.ones(1)

    def get_comm_beta(self, t_env):
        comm_beta = self.args.comm_beta
        if self.args.is_comm_beta_decay and t_env > self.args.comm_beta_start_decay:
            comm_beta += 1. * (self.args.comm_beta_target - self.args.comm_beta) / \
                (self.args.comm_beta_end_decay - self.args.comm_beta_start_decay) * \
                (t_env - self.args.comm_beta_start_decay)
        return comm_beta

    def get_comm_entropy_beta(self, t_env):
        comm_entropy_beta = self.args.comm_entropy_beta
        if self.args.is_comm_entropy_beta_decay and t_env > self.args.comm_entropy_beta_start_decay:
            comm_entropy_beta += 1. * (self.args.comm_entropy_beta_target - self.args.comm_entropy_beta) / \
                (self.args.comm_entropy_beta_end_decay - self.args.comm_entropy_beta_start_decay) * \
                (t_env - self.args.comm_entropy_beta_start_decay)
        return comm_entropy_beta

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        # shape = (bs, self.n_agents, -1)
        mac_out = []
        mu_out = []
        sigma_out = []
        logits_out = []
        m_sample_out = []
        g_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            if self.args.comm and self.args.use_IB:
                agent_outs, (mu,
                             sigma), logits, m_sample = self.mac.forward(batch,
                                                                         t=t)
                mu_out.append(mu)
                sigma_out.append(sigma)
                logits_out.append(logits)
                m_sample_out.append(m_sample)
            else:
                agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        if self.args.use_IB:
            mu_out = th.stack(mu_out, dim=1)[:, :-1]  # Concat over time
            sigma_out = th.stack(sigma_out, dim=1)[:, :-1]  # Concat over time
            logits_out = th.stack(logits_out, dim=1)[:, :-1]
            m_sample_out = th.stack(m_sample_out, dim=1)[:, :-1]

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(
                                            3)  # Remove the last dim
        # I believe that code up to here is right...

        # Q values are right, the main issue is to calculate loss for message...

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            if self.args.comm and self.args.use_IB:
                target_agent_outs, (target_mu, target_sigma), target_logits, target_m_sample = \
                 self.target_mac.forward(batch, t=t)
            else:
                target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # label
        label_target_max_out = th.stack(target_mac_out[:-1], dim=1)
        label_target_max_out[avail_actions[:, :-1] == 0] = -9999999
        label_target_actions = label_target_max_out.max(dim=3, keepdim=True)[1]

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out[1:],
                                  dim=1)  # Concat across time

        # Mask out unavailable actions
        target_mac_out[avail_actions[:, 1:] == 0] = -9999999

        # Max over target Q-Values
        if self.args.double_q:
            # Get actions that maximise live Q (for double q-learning)
            mac_out[avail_actions == 0] = -9999999
            cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1]
            target_max_qvals = th.gather(target_mac_out, 3,
                                         cur_max_actions).squeeze(3)
        else:
            target_max_qvals = target_mac_out.max(dim=3)[0]

        # Mix
        if self.mixer is not None:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            target_max_qvals = self.target_mixer(target_max_qvals,
                                                 batch["state"][:, 1:])

        # Calculate 1-step Q-Learning targets
        targets = rewards + self.args.gamma * (1 -
                                               terminated) * target_max_qvals

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error**2).sum() / mask.sum()

        if self.args.only_downstream or not self.args.use_IB:
            expressiveness_loss = th.Tensor([0.])
            compactness_loss = th.Tensor([0.])
            entropy_loss = th.Tensor([0.])
            comm_loss = th.Tensor([0.])
            comm_beta = th.Tensor([0.])
            comm_entropy_beta = th.Tensor([0.])
        else:
            # ### Optimize message
            # Message are controlled only by expressiveness and compactness loss.
            # Compute cross entropy with target q values of the same time step
            expressiveness_loss = 0
            label_prob = th.gather(logits_out, 3,
                                   label_target_actions).squeeze(3)
            expressiveness_loss += (
                -th.log(label_prob + 1e-6)).sum() / mask.sum()

            # Compute KL divergence
            compactness_loss = D.kl_divergence(D.Normal(mu_out, sigma_out), D.Normal(self.s_mu, self.s_sigma)).sum() / \
                               mask.sum()

            # Entropy loss
            entropy_loss = -D.Normal(self.s_mu, self.s_sigma).log_prob(
                m_sample_out).sum() / mask.sum()

            # Gate loss
            gate_loss = 0

            # Total loss
            comm_beta = self.get_comm_beta(t_env)
            comm_entropy_beta = self.get_comm_entropy_beta(t_env)
            comm_loss = expressiveness_loss + comm_beta * compactness_loss + comm_entropy_beta * entropy_loss
            comm_loss *= self.args.c_beta
            loss += comm_loss
            comm_beta = th.Tensor([comm_beta])
            comm_entropy_beta = th.Tensor([comm_entropy_beta])

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        # Update target
        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("comm_loss", comm_loss.item(), t_env)
            self.logger.log_stat("exp_loss", expressiveness_loss.item(), t_env)
            self.logger.log_stat("comp_loss", compactness_loss.item(), t_env)
            self.logger.log_stat("comm_beta", comm_beta.item(), t_env)
            self.logger.log_stat("entropy_loss", entropy_loss.item(), t_env)
            self.logger.log_stat("comm_beta", comm_beta.item(), t_env)
            self.logger.log_stat("comm_entropy_beta", comm_entropy_beta.item(),
                                 t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        # self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()
        self.s_mu = self.s_mu.cuda()
        self.s_sigma = self.s_sigma.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
class QLearner_3s_vs_4z:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger
        self.params = list(mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        self.params += list(self.mac.msg_rnn.parameters())
        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)
        self.target_mac = copy.deepcopy(mac)
        self.log_stats_t = -self.args.learner_log_interval - 1

        self.loss_weight = [0.5, 1, 1.5]  # its the beta in the Algorithm 1

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []

        previous_msg_list = []
        smooth_loss_list = []
        regulariation_smooth = 1.5
        regulariation_robust = 0.3

        self.mac.init_hidden(batch.batch_size)
        smooth_loss = th.zeros((32 * 3)).cuda()
        for t in range(batch.max_seq_length):
            agent_local_outputs, input_hidden_states, vi = self.mac.forward(
                batch, t=t)
            input_hidden_states = input_hidden_states.view(-1, 64)
            self.mac.hidden_states_msg, dummy = self.mac.msg_rnn(
                self.mac.hidden_states_msg, input_hidden_states)
            ss = min(len(previous_msg_list), 3)
            #record the l2 difference in the window
            for i in range(ss):
                smooth_loss += self.loss_weight[i] * ((
                    (dummy - previous_msg_list[i])**2).sum(dim=1)) / (
                        (ss * 32 * 3 * 10 * (dummy**2)).sum(dim=1))
            previous_msg_list.append(dummy)
            if (len(previous_msg_list) > 3): previous_msg_list.pop(0)

            smooth_loss_reshape = smooth_loss.reshape(32, 3, 1).sum(1)  #(32,1)
            smooth_loss_list.append(smooth_loss_reshape)

            # generate the message
            dummy_final = dummy.reshape(32, 3, 10)
            dummy0 = dummy_final[:, 0, :]
            dummy1 = dummy_final[:, 1, :]
            dummy2 = dummy_final[:, 2, :]

            agent0 = (dummy1 + dummy2) / 2.0
            agent1 = (dummy0 + dummy2) / 2.0
            agent2 = (dummy0 + dummy1) / 2.0

            agent_global_outputs = th.cat((agent0.view(
                (32, 1, 10)), agent1.view((32, 1, 10)), agent2.view(
                    (32, 1, 10))), 1)
            agent_outs = agent_local_outputs + agent_global_outputs
            mac_out.append(agent_outs)

        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        ############compute the robustness loss##################
        robust_loss = th.topk(mac_out, 2)[0][:, :, :, 0] - th.topk(
            mac_out, 2)[0][:, :, :, 1]
        robust_loss = th.exp(-25.0 * robust_loss).sum(
            dim=2)[:, :-1].unsqueeze(2) / (32 * 6)  #(32,38)

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(
                                            3)  # Remove the last dim
        # Calculate the Q-Values necessary for the target
        target_mac_out = []

        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_local_outputs, target_input_hidden_states, tvi = self.target_mac.forward(
                batch, t=t)
            target_input_hidden_states = target_input_hidden_states.view(
                -1, 64)
            self.target_mac.hidden_states_msg, target_dummy = self.target_mac.msg_rnn(
                self.target_mac.hidden_states_msg, target_input_hidden_states)

            target_dummy_final = target_dummy.reshape(32, 3, 10)
            dummy0 = target_dummy_final[:, 0, :]
            dummy1 = target_dummy_final[:, 1, :]
            dummy2 = target_dummy_final[:, 2, :]

            target_agent0 = (dummy1 + dummy2) / 2.0
            target_agent1 = (dummy0 + dummy2) / 2.0
            target_agent2 = (dummy0 + dummy1) / 2.0

            target_agent_global_outputs = th.cat((target_agent0.view(
                (32, 1, 10)), target_agent1.view(
                    (32, 1, 10)), target_agent2.view((32, 1, 10))), 1)
            target_agent_outs = target_agent_local_outputs + target_agent_global_outputs
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out[1:],
                                  dim=1)  # Concat across time

        # Mask out unavailable actions
        target_mac_out[avail_actions[:, 1:] == 0] = -9999999

        # Max over target Q-Values
        if self.args.double_q:
            # Get actions that maximise live Q (for double q-learning)
            mac_out[avail_actions == 0] = -9999999
            cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1]
            target_max_qvals = th.gather(target_mac_out, 3,
                                         cur_max_actions).squeeze(3)
        else:
            target_max_qvals = target_mac_out.max(dim=3)[0]

        # Mix
        if self.mixer is not None:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            target_max_qvals = self.target_mixer(target_max_qvals,
                                                 batch["state"][:, 1:])

        # Calculate 1-step Q-Learning targets
        targets = rewards + self.args.gamma * (1 -
                                               terminated) * target_max_qvals

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        ######compute the smooth_loss and robust_loss#########
        smooth_loss = th.stack(smooth_loss_list[0:-1], dim=1)
        smooth_loss = (smooth_loss * mask).sum() / mask.sum()
        robust_loss = (robust_loss * mask).sum() / mask.sum()

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error**2).sum() / mask.sum(
        ) + regulariation_smooth * smooth_loss + regulariation_robust * robust_loss
        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #7
0
class MAXQLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.mac_params = list(mac.parameters())
        self.params = list(self.mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        assert args.mixer is not None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.mixer_params = list(self.mixer.parameters())
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        # Central Q
        # TODO: Clean this mess up!
        self.central_mac = None
        if self.args.central_mixer in ["ff", "atten"]:
            if self.args.central_loss == 0:
                self.central_mixer = self.mixer
                self.central_mac = self.mac
                self.target_central_mac = self.target_mac
            else:
                if self.args.central_mixer == "ff":
                    self.central_mixer = QMixerCentralFF(
                        args
                    )  # Feedforward network that takes state and agent utils as input
                # elif self.args.central_mixer == "atten":
                # self.central_mixer = QMixerCentralAtten(args)
                else:
                    raise Exception("Error with central_mixer")

                assert args.central_mac == "basic_central_mac"
                self.central_mac = mac_REGISTRY[args.central_mac](
                    scheme, args
                )  # Groups aren't used in the CentralBasicController. Little hacky
                self.target_central_mac = copy.deepcopy(self.central_mac)
                self.params += list(self.central_mac.parameters())
        else:
            raise Exception("Error with qCentral")
        self.params += list(self.central_mixer.parameters())
        self.target_central_mixer = copy.deepcopy(self.central_mixer)

        print('Mixer Size: ')
        print(
            get_parameters_num(
                list(self.mixer.parameters()) +
                list(self.central_mixer.parameters())))

        self.optimiser = Adam(params=self.params, lr=args.lr)

        self.log_stats_t = -self.args.learner_log_interval - 1

        self.grad_norm = 1
        self.mixer_norm = 1
        self.mixer_norms = deque([1], maxlen=100)

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals_agents = th.gather(mac_out[:, :-1],
                                               dim=3,
                                               index=actions).squeeze(
                                                   3)  # Remove the last dim
        chosen_action_qvals = chosen_action_qvals_agents

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out[:],
                                  dim=1)  # Concat across time

        # Mask out unavailable actions
        target_mac_out[avail_actions[:, :] == 0] = -9999999  # From OG deepmarl

        # Max over target Q-Values
        if self.args.double_q:
            # Get actions that maximise live Q (for double q-learning)
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            cur_max_action_targets, cur_max_actions = mac_out_detach[:, :].max(
                dim=3, keepdim=True)
            target_max_agent_qvals = th.gather(
                target_mac_out[:, :], 3, cur_max_actions[:, :]).squeeze(3)
        else:
            raise Exception("Use double q")

        # Central MAC stuff
        central_mac_out = []
        self.central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.central_mac.forward(batch, t=t)
            central_mac_out.append(agent_outs)
        central_mac_out = th.stack(central_mac_out, dim=1)  # Concat over time
        central_chosen_action_qvals_agents = th.gather(
            central_mac_out[:, :-1],
            dim=3,
            index=actions.unsqueeze(4).repeat(
                1, 1, 1, 1, self.args.central_action_embed)).squeeze(
                    3)  # Remove the last dim

        central_target_mac_out = []
        self.target_central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_central_mac.forward(batch, t=t)
            central_target_mac_out.append(target_agent_outs)
        central_target_mac_out = th.stack(central_target_mac_out[:],
                                          dim=1)  # Concat across time
        # Mask out unavailable actions
        central_target_mac_out[avail_actions[:, :] ==
                               0] = -9999999  # From OG deepmarl
        # Use the Qmix max actions
        central_target_max_agent_qvals = th.gather(
            central_target_mac_out[:, :], 3,
            cur_max_actions[:, :].unsqueeze(4).repeat(
                1, 1, 1, 1, self.args.central_action_embed)).squeeze(3)
        # ---

        # Mix
        chosen_action_qvals = self.mixer(chosen_action_qvals,
                                         batch["state"][:, :-1])
        target_max_qvals = self.target_central_mixer(
            central_target_max_agent_qvals, batch["state"])

        # We use the calculation function of sarsa lambda to approximate q star lambda
        targets = build_td_lambda_targets(rewards, terminated, mask,
                                          target_max_qvals, self.args.n_agents,
                                          self.args.gamma, self.args.td_lambda)

        # Td-error
        td_error = (chosen_action_qvals - (targets.detach()))

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Training central Q
        central_chosen_action_qvals = self.central_mixer(
            central_chosen_action_qvals_agents, batch["state"][:, :-1])
        central_td_error = (central_chosen_action_qvals - targets.detach())
        central_mask = mask.expand_as(central_td_error)
        central_masked_td_error = central_td_error * central_mask
        central_loss = 0.5 * (central_masked_td_error**2).sum() / mask.sum()

        # QMIX loss with weighting
        ws = th.ones_like(td_error) * self.args.w
        if self.args.hysteretic_qmix:  # OW-QMIX
            ws = th.where(td_error < 0,
                          th.ones_like(td_error) * 1,
                          ws)  # Target is greater than current max
            w_to_use = ws.mean().item()  # For logging
        else:  # CW-QMIX
            is_max_action = (actions == cur_max_actions[:, :-1]).min(dim=2)[0]
            max_action_qtot = self.target_central_mixer(
                central_target_max_agent_qvals[:, :-1], batch["state"][:, :-1])
            qtot_larger = targets > max_action_qtot
            ws = th.where(is_max_action | qtot_larger,
                          th.ones_like(td_error) * 1,
                          ws)  # Target is greater than current max
            w_to_use = ws.mean().item()  # Average of ws for logging

        qmix_loss = (ws.detach() * (masked_td_error**2)).sum() / mask.sum()

        # The weightings for the different losses aren't used (they are always set to 1)
        loss = self.args.qmix_loss * qmix_loss + self.args.central_loss * central_loss

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()

        # Logging
        agent_norm = 0
        for p in self.mac_params:
            param_norm = p.grad.data.norm(2)
            agent_norm += param_norm.item()**2
        agent_norm = agent_norm**(1. / 2)

        mixer_norm = 0
        for p in self.mixer_params:
            param_norm = p.grad.data.norm(2)
            mixer_norm += param_norm.item()**2
        mixer_norm = mixer_norm**(1. / 2)
        self.mixer_norm = mixer_norm
        self.mixer_norms.append(mixer_norm)

        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.grad_norm = grad_norm

        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("qmix_loss", qmix_loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            self.logger.log_stat("mixer_norm", mixer_norm, t_env)
            self.logger.log_stat("agent_norm", agent_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("central_loss", central_loss.item(), t_env)
            self.logger.log_stat("w_to_use", w_to_use, t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        if self.central_mac is not None:
            self.target_central_mac.load_state(self.central_mac)
        self.target_central_mixer.load_state_dict(
            self.central_mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()
        if self.central_mac is not None:
            self.central_mac.cuda()
            self.target_central_mac.cuda()
        self.central_mixer.cuda()
        self.target_central_mixer.cuda()

    # TODO: Model saving/loading is out of date!
    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #8
0
class LatentQLearner(QLearner):
    def __init__(self, mac, scheme, logger, args):
        super(LatentQLearner, self).__init__(mac, scheme, logger, args)
        self.args = args
        self.mac = mac
        self.logger = logger

        self.params = list(mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        self.log_stats_t = -self.args.learner_log_interval - 1
        self.role_save = 0
        self.role_save_interval = 10

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []

        self.mac.init_hidden(batch.batch_size)
        indicator, latent, latent_vae = self.mac.init_latent(batch.batch_size)

        reg_loss = 0
        dis_loss = 0
        ce_loss = 0
        for t in range(batch.max_seq_length):
            agent_outs, loss_, dis_loss_, ce_loss_ = self.mac.forward(
                batch, t=t, t_glob=t_env,
                train_mode=True)  # (bs,n,n_actions),(bs,n,latent_dim)
            reg_loss += loss_
            dis_loss += dis_loss_
            ce_loss += ce_loss_
            # loss_cs=self.args.gamma*loss_cs + _loss
            mac_out.append(agent_outs)  # [t,(bs,n,n_actions)]
            # mac_out_latent.append((agent_outs_latent)) #[t,(bs,n,latent_dim)]

        reg_loss /= batch.max_seq_length
        dis_loss /= batch.max_seq_length
        ce_loss /= batch.max_seq_length

        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        # (bs,t,n,n_actions), Q values of n_actions

        # mac_out_latent=th.stack(mac_out_latent,dim=1)
        # (bs,t,n,latent_dim)
        # mac_out_latent=mac_out_latent.reshape(-1,self.args.latent_dim)

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3,
                                        index=actions).squeeze(
                                            3)  # Remove the last dim
        # (bs,t,n) Q value of an action

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)  # (bs,n,hidden_size)
        self.target_mac.init_latent(batch.batch_size)  # (bs,n,latent_size)

        for t in range(batch.max_seq_length):
            target_agent_outs, loss_cs_target, _, _ = self.target_mac.forward(
                batch, t=t)  # (bs,n,n_actions), (bs,n,latent_dim)
            target_mac_out.append(target_agent_outs)  # [t,(bs,n,n_actions)]

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(
            target_mac_out[1:],
            dim=1)  # Concat across time, dim=1 is time index
        # (bs,t,n,n_actions)

        # Mask out unavailable actions
        target_mac_out[avail_actions[:, 1:] == 0] = -9999999  # Q values

        # Max over target Q-Values
        if self.args.double_q:  # True for QMix
            # Get actions that maximise live Q (for double q-learning)
            mac_out_detach = mac_out.clone().detach(
            )  # return a new Tensor, detached from the current graph
            mac_out_detach[avail_actions == 0] = -9999999
            # (bs,t,n,n_actions), discard t=0
            cur_max_actions = mac_out_detach[:, 1:].max(
                dim=3, keepdim=True)[1]  # indices instead of values
            # (bs,t,n,1)
            target_max_qvals = th.gather(target_mac_out, 3,
                                         cur_max_actions).squeeze(3)
            # (bs,t,n,n_actions) ==> (bs,t,n,1) ==> (bs,t,n) max target-Q
        else:
            target_max_qvals = target_mac_out.max(dim=3)[0]

        # Mix
        if self.mixer is not None:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            target_max_qvals = self.target_mixer(target_max_qvals,
                                                 batch["state"][:, 1:])
            # (bs,t,1)

        # Calculate 1-step Q-Learning targets
        targets = rewards + self.args.gamma * (1 -
                                               terminated) * target_max_qvals

        # Td-error
        td_error = (chosen_action_qvals - targets.detach()
                    )  # no gradient through target net
        # (bs,t,1)

        mask = mask.expand_as(td_error)

        # 0-out the targets that came from padded data
        masked_td_error = td_error * mask

        # Normal L2 loss, take mean over actual data
        loss = (masked_td_error**2).sum() / mask.sum()

        # entropy loss
        # mac_out_latent_norm=th.sqrt(th.sum(mac_out_latent*mac_out_latent,dim=1))
        # mac_out_latent=mac_out_latent/mac_out_latent_norm[:,None]
        # loss+=(th.norm(mac_out_latent)/mac_out_latent.size(0))*self.args.entropy_loss_weight
        loss += reg_loss

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(
            self.params, self.args.grad_norm_clip)  # max_norm
        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            #    if self.role_save % self.role_save_interval == 0:
            #        self.role_save = 0
            #        if self.args.latent_dim in [2, 3]:

            # fig = plt.figure()
            # ax = fig.add_subplot(111, projection='3d')
            #           print(self.mac.agent.latent[:, :self.args.latent_dim],
            #                  self.mac.agent.latent[:, -self.args.latent_dim:])

            #    self.role_save += 1

            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("loss_reg", reg_loss.item(), t_env)
            self.logger.log_stat("loss_dis", dis_loss.item(), t_env)
            self.logger.log_stat("loss_ce", ce_loss.item(), t_env)

            #indicator=[var_mean,mi.max(),mi.min(),mi.mean(),mi.std(),di.max(),di.min(),di.mean(),di.std()]
            self.logger.log_stat("var_mean", indicator[0].item(), t_env)
            self.logger.log_stat("mi_max", indicator[1].item(), t_env)
            self.logger.log_stat("mi_min", indicator[2].item(), t_env)
            self.logger.log_stat("mi_mean", indicator[3].item(), t_env)
            self.logger.log_stat("mi_std", indicator[4].item(), t_env)
            self.logger.log_stat("di_max", indicator[5].item(), t_env)
            self.logger.log_stat("di_min", indicator[6].item(), t_env)
            self.logger.log_stat("di_mean", indicator[7].item(), t_env)
            self.logger.log_stat("di_std", indicator[8].item(), t_env)

            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)

            if self.args.use_tensorboard:
                # log_vec(self,mat,metadata,label_img,global_step,tag)
                self.logger.log_vec(latent, list(range(self.args.n_agents)),
                                    t_env, "latent")
                self.logger.log_vec(latent_vae,
                                    list(range(self.args.n_agents)), t_env,
                                    "latent-VAE")
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
class SACQLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.mac_params = list(mac.parameters())
        self.params = list(self.mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        assert args.mixer is not None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.mixer_params = list(self.mixer.parameters())
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        # Central Q
        # TODO: Clean this mess up!
        self.central_mac = None
        assert self.args.central_mixer == "ff"
        self.central_mixer = QMixerCentralFF(args)
        assert args.central_mac == "basic_central_mac"
        self.central_mac = mac_REGISTRY[args.central_mac](
            scheme, args
        )  # Groups aren't used in the CentralBasicController. Little hacky
        self.target_central_mac = copy.deepcopy(self.central_mac)
        self.params += list(self.central_mac.parameters())
        self.params += list(self.central_mixer.parameters())
        self.target_central_mixer = copy.deepcopy(self.central_mixer)

        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)

        self.log_stats_t = -self.args.learner_log_interval - 1

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Current policies
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Mask out unavailable actions, renormalise (as in action selection)
        mac_out[avail_actions == 0] = 0
        mac_out = mac_out / mac_out.sum(dim=-1, keepdim=True)
        mac_out[avail_actions == 0] = 0
        mac_out[(mac_out.sum(dim=-1, keepdim=True) == 0).expand_as(
            mac_out
        )] = 1  # Set any all 0 probability vectors to all 1s. They will be masked out later, but still need to be sampled.

        # Target policies
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)
        target_mac_out = th.stack(target_mac_out, dim=1)  # Concat across time

        # Mask out unavailable actions, renormalise (as in action selection)
        target_mac_out[avail_actions == 0] = 0
        target_mac_out = target_mac_out / target_mac_out.sum(dim=-1,
                                                             keepdim=True)
        target_mac_out[avail_actions == 0] = 0
        target_mac_out[(
            target_mac_out.sum(dim=-1, keepdim=True) == 0
        ).expand_as(
            target_mac_out
        )] = 1  # Set any all 0 probability vectors to all 1s. They will be masked out later, but still need to be sampled.

        # Sample actions
        sampled_actions = Categorical(mac_out).sample().long()
        sampled_target_actions = Categorical(target_mac_out).sample().long()

        # Central MAC stuff
        central_mac_out = []
        self.central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.central_mac.forward(batch, t=t)
            central_mac_out.append(agent_outs)
        central_mac_out = th.stack(central_mac_out, dim=1)  # Concat over time
        # Actions chosen from replay buffer
        central_chosen_action_qvals_agents = th.gather(
            central_mac_out[:, :-1],
            dim=3,
            index=actions.unsqueeze(4).repeat(
                1, 1, 1, 1, self.args.central_action_embed)).squeeze(
                    3)  # Remove the last dim

        central_target_mac_out = []
        self.target_central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_central_mac.forward(batch, t=t)
            central_target_mac_out.append(target_agent_outs)
        central_target_mac_out = th.stack(central_target_mac_out[:],
                                          dim=1)  # Concat across time
        central_target_action_qvals_agents = th.gather(central_target_mac_out[:,:], 3, \
                                                       sampled_target_actions[:,:].unsqueeze(3).unsqueeze(4)\
                                                        .repeat(1,1,1,1,self.args.central_action_embed)).squeeze(3)
        # ---

        critic_bootstrap_qvals = self.target_central_mixer(
            central_target_action_qvals_agents[:, 1:], batch["state"][:, 1:])

        target_chosen_action_probs = th.gather(
            target_mac_out, dim=3,
            index=sampled_target_actions.unsqueeze(3)).squeeze(dim=3)
        target_policy_logs = th.log(target_chosen_action_probs).sum(
            dim=2, keepdim=True)  # Sum across agents
        # Calculate 1-step Q-Learning targets
        targets = rewards + self.args.gamma * (1 - terminated) * \
                  (critic_bootstrap_qvals - self.args.entropy_temp * target_policy_logs[:,1:])

        # Training Critic
        central_chosen_action_qvals = self.central_mixer(
            central_chosen_action_qvals_agents, batch["state"][:, :-1])
        central_td_error = (central_chosen_action_qvals - targets.detach())
        central_mask = mask.expand_as(central_td_error)
        central_masked_td_error = central_td_error * central_mask
        central_loss = (central_masked_td_error**2).sum() / mask.sum()

        # Actor Loss
        central_sampled_action_qvals_agents = th.gather(central_mac_out[:, :-1], 3, \
                                                        sampled_actions[:, :-1].unsqueeze(3).unsqueeze(4) \
                                                        .repeat(1, 1, 1, 1, self.args.central_action_embed)).squeeze(3)
        central_sampled_action_qvals = self.central_mixer(
            central_sampled_action_qvals_agents,
            batch["state"][:, :-1]).repeat(1, 1, self.args.n_agents)
        sampled_action_probs = th.gather(
            mac_out, dim=3, index=sampled_actions.unsqueeze(3)).squeeze(3)
        policy_logs = th.log(sampled_action_probs)[:, :-1]
        actor_mask = mask.expand_as(policy_logs)
        actor_loss = (
            (policy_logs *
             (self.args.entropy_temp *
              (policy_logs + 1) - central_sampled_action_qvals).detach()) *
            actor_mask).sum() / actor_mask.sum()

        loss = self.args.actor_loss * actor_loss + self.args.central_loss * central_loss

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()

        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.grad_norm = grad_norm

        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("actor_loss", actor_loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("central_loss", central_loss.item(), t_env)
            ps = mac_out[:, :-1] * avail_actions[:, :-1]
            log_ps = th.log(mac_out[:, :-1] + 0.00001) * avail_actions[:, :-1]
            actor_entropy = -((
                (ps * log_ps).sum(dim=3) * mask).sum() / mask.sum())
            self.logger.log_stat("actor_entropy", actor_entropy.item(), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        if self.central_mac is not None:
            self.target_central_mac.load_state(self.central_mac)
        self.target_central_mixer.load_state_dict(
            self.central_mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()
        if self.central_mac is not None:
            self.central_mac.cuda()
            self.target_central_mac.cuda()
        self.central_mixer.cuda()
        self.target_central_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #10
0
class PGLearner_v2:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.n_agents = args.n_agents
        self.n_actions = args.n_actions
        self.mac = mac
        self.logger = logger

        self.last_target_update_step = 0
        self.critic_training_steps = 0

        self.log_stats_t = -self.args.learner_log_interval - 1

        self.target_mac = copy.deepcopy(mac)
        self.params = list(self.mac.parameters())

        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())

        if self.args.optim == 'adam':
            self.optimiser = Adam(params=self.params, lr=args.lr)
        else:
            self.optimiser = RMSprop(params=self.params, lr=args.lr)

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        bs = batch.batch_size
        max_t = batch.max_seq_length
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :]

        critic_mask = mask.clone()
        mask = mask.repeat(1, 1, self.n_agents).view(-1)

        advantages, td_error, targets_taken, log_pi_taken, entropy = self._calculate_advs(batch, rewards, terminated, actions, avail_actions,
                                                        critic_mask, bs, max_t)

        pg_loss = - ((advantages.detach() * log_pi_taken) * mask).sum() / mask.sum()
        vf_loss = ((td_error ** 2) * mask).sum() / mask.sum()
        entropy[mask == 0] = 0
        entropy_loss = (entropy * mask).sum() / mask.sum()

        coma_loss = pg_loss + self.args.vf_coef * vf_loss
        if self.args.ent_coef:
            coma_loss -= self.args.ent_coef * entropy_loss

        # Optimise agents
        self.optimiser.zero_grad()
        coma_loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
        self.optimiser.step()


        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("critic_loss", ((td_error ** 2) * mask).sum().item() / mask.sum().item(), t_env)
            self.logger.log_stat("td_error_abs", (td_error.abs() * mask).sum().item() / mask.sum().item(), t_env)
            self.logger.log_stat("q_taken_mean", (targets_taken * mask).sum().item() / mask.sum().item(), t_env)
            self.logger.log_stat("target_mean", ((targets_taken + advantages) * mask).sum().item() / mask.sum().item(), t_env)
            self.logger.log_stat("pg_loss", - ((advantages.detach() * log_pi_taken) * mask).sum().item() / mask.sum().item(), t_env)
            self.logger.log_stat("advantage_mean", (advantages * mask).sum().item() / mask.sum().item(), t_env)
            self.logger.log_stat("coma_loss", coma_loss.item(), t_env)
            self.logger.log_stat("entropy_loss", entropy_loss.item(), t_env)
            self.logger.log_stat("agent_grad_norm", grad_norm, t_env)
            # self.logger.log_stat("pi_max", (pi.max(dim=1)[0] * mask).sum().item() / mask.sum().item(), t_env)
            self.log_stats_t = t_env

    def _calculate_advs(self, batch, rewards, terminated, actions, avail_actions, mask, bs, max_t):
        mac_out = []
        q_outs = []
        # Roll out experiences
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_out, q_out = self.mac.forward(batch, t=t)
            mac_out.append(agent_out)
            q_outs.append(q_out)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time
        q_outs = th.stack(q_outs, dim=1)  # Concat over time

        # Mask out unavailable actions, renormalise (as in action selection)
        # mac_out[avail_actions == 0] = 0
        # mac_out = mac_out/(mac_out.sum(dim=-1, keepdim=True) + 1e-5)

        # Calculated baseline
        pi = mac_out[:, :-1]  #[bs, t, n_agents, n_actions]
        pi_taken = th.gather(pi, dim=-1, index=actions[:, :-1]).squeeze(-1)    #[bs, t, n_agents]
        action_mask = mask.repeat(1, 1, self.n_agents)
        pi_taken[action_mask == 0] = 1.0
        log_pi_taken = th.log(pi_taken).reshape(-1)

        # Calculate entropy
        entropy = categorical_entropy(pi).reshape(-1)  #[bs, t, n_agents, 1]

        # Calculate q targets
        targets_taken = q_outs.squeeze(-1)   #[bs, t, n_agents]
        if self.args.mixer:
            targets_taken = self.mixer(targets_taken, batch["state"][:, :]) #[bs, t, 1]

        # Calculate td-lambda targets
        targets = build_td_lambda_targets(rewards, terminated, mask, targets_taken, self.n_agents, self.args.gamma, self.args.td_lambda)

        advantages = targets - targets_taken[:, :-1]
        advantages = advantages.unsqueeze(2).repeat(1, 1, self.n_agents, 1).reshape(-1)

        td_error = targets_taken[:, :-1] - targets.detach()
        td_error = td_error.unsqueeze(2).repeat(1, 1, self.n_agents, 1).reshape(-1)


        return advantages, td_error, targets_taken[:, :-1].unsqueeze(2).repeat(1, 1, self.n_agents, 1).reshape(-1), log_pi_taken, entropy


    def cuda(self):
        self.mac.cuda()
        if self.args.mixer:
            self.mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.args.mixer:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        if self.args.mixer:
            self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
Exemple #11
0
class SCLearner:
    def __init__(self, mac, scheme, logger, args):
        torch.autograd.set_detect_anomaly(True)
        self.args = args
        self.n_agents = args.n_agents
        self.n_actions = args.n_actions
        self.mac = mac
        self.logger = logger

        self.last_target_update_step = 0
        self.critic_training_steps = 0

        self.log_stats_t = -self.args.learner_log_interval - 1

        self.control_critic = SCControlCritic(scheme, args)
        self.execution_critic = SCExecutionCritic(scheme, args)
        self.target_control_critic = copy.deepcopy(self.control_critic)
        self.target_execution_critic = copy.deepcopy(self.execution_critic)

        self.control_actor_params = list(self.mac.agent_dlstm_parameters()) + list(self.mac.latent_state_encoder_parameters())
        self.execution_actor_params = list(self.mac.agent_lstm_parameters())
        self.control_critic_params = list(self.control_critic.parameters())
        self.execution_critic_params = list(self.execution_critic.parameters())

        self.control_mixer = None
        if args.control_mixer is not None:
            if args.control_mixer == "vdn":
                self.control_mixer = VDNMixer()
            elif args.control_mixer == "qmix":
                self.control_mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.control_mixer))
            self.target_control_mixer = copy.deepcopy(self.control_mixer)
            self.control_critic_params += list(self.control_mixer.parameters())

        self.execution_mixer = None
        if args.execution_mixer is not None:
            if args.execution_mixer == "vdn":
                self.execution_mixer = VDNMixer()
            elif args.execution_mixer == "qmix":
                self.execution_mixer = DirMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.execution_mixer))
            self.target_execution_mixer = copy.deepcopy(self.execution_mixer)
            self.execution_critic_params += list(self.execution_mixer.parameters())     

        self.control_actor_optimiser = RMSprop(params=self.control_actor_params, lr=args.control_actor_lr, alpha=args.optim_alpha, eps=args.optim_eps)
        self.control_critic_optimiser = RMSprop(params=self.control_critic_params, lr=args.control_critic_lr, alpha=args.optim_alpha, eps=args.optim_eps)
        self.execution_actor_optimiser = RMSprop(params=self.execution_actor_params, lr=args.execution_actor_lr, alpha=args.optim_alpha, eps=args.optim_eps)
        self.execution_critic_optimiser = RMSprop(params=self.execution_critic_params, lr=args.execution_critic_lr, alpha=args.optim_alpha, eps=args.optim_eps)

        # self.target_mac = copy.deepcopy(mac)
        # FIXME: implement double mac

    # control:
    #   - decentralized actor pi(|):
    #   - decentralized critic q(|) with mixer: estimate rewards

    # execution:
    #   - decentralized actor pi(|):
    #   - decentralized critic q(|) with mixer: estimate directional contributions

    def train(self, batch, t_env, epsidoe_num):
        bs = batch.batch_size
        max_t = batch.max_seq_length
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"][:, :-1]

        dirs_vals, execution_critic_train_stats = self._train_execution_critic(batch, terminated, mask)
        # [bs, seq_len, n_agents, latent_state_dim]
        q_vals, control_critic_train_stats = self._train_control_critic(batch, rewards, terminated, mask)
        # [bs, seq_len, n_agents]
        self.critic_training_steps += 1

        actions = actions[:, :-1]

        lstm_out = []
        dlstm_query_out = []
        dlstm_key_out = []
        dlstm_rule_out = []
        lstm_r = []
        self.mac.init_hidden(bs)
        self.mac.init_goals(bs)
        for t in range(batch["reward"].shape[1] - 1):
            # skip inferring latent state
            lstm_output, dlstm_output = self.mac.forward(batch, t)
            lstm_out.append(lstm_output)  # [batch_num][n_agents][n_actions]
            dlstm_query_out.append(dlstm_output[0])
            dlstm_key_out.append(dlstm_output[1])
            dlstm_rule_out.append(dlstm_output[2])
            intr_r = self._get_instrinsic_r(dirs_vals, t)
            lstm_r.append(self.args.instr_r_rate * intr_r + q_vals[:, t])
        lstm_out = torch.stack(lstm_out, dim=1)  # [batch_num][seq][n_agents][n_actions]
        dlstm_query_out = torch.stack(dlstm_query_out, dim=1)
        dlstm_key_out = torch.stack(dlstm_key_out, dim=1)
        dlstm_rule_out = torch.stack(dlstm_rule_out, dim=1)
        lstm_r = torch.stack(lstm_r, dim=1)

        dlstm_loss_partial = []
        for t in range(batch["reward"].shape[1] - self.args.horizon-1):  # FIXEME: can implement slice instead of t iterations
            dlstm_loss_partial_t = self._get_dlistm_partial(dirs_vals, dlstm_query_out, dlstm_key_out, dlstm_rule_out,
                                                              t)
            dlstm_loss_partial.append(dlstm_loss_partial_t)
        dlstm_loss_partial = torch.stack(dlstm_loss_partial, dim=1)  # [bs, seq_len-c, n_agents]

        # Mask out unavailable actions, renormalise (as in action selection)
        lstm_out[avail_actions == 0] = 0  # [batch_num][seq][n_agents][n_actions]
        lstm_out = lstm_out/lstm_out.sum(dim=-1, keepdim=True)
        lstm_out[avail_actions == 0] = 0

        # FIXME: implement q baseline
        q_vals = q_vals[:, :-self.args.horizon]
        q_vals = q_vals.reshape(-1, 1).squeeze(1)
        lstm_r = lstm_r.reshape(-1, 1).squeeze(1)
        pi = lstm_out.reshape(-1, self.n_actions)

        mask_dlstm = mask.clone().repeat(1, 1, self.n_agents)[:, :-self.args.horizon].reshape(-1, 1).squeeze(1)
        mask = mask.repeat(1, 1, self.n_agents).view(-1)
        # print("mask_dlstm shape:{}".format(mask_dlstm.shape))

        pi_taken = torch.gather(pi, dim=1, index=actions.reshape(-1, 1)).squeeze(1)
        pi_taken[mask == 0] = 1.0
        log_pi_taken = torch.log(pi_taken)

        dlstm_loss_partial = dlstm_loss_partial.reshape(-1, 1).squeeze(1)
        dlstm_loss_partial[mask_dlstm == 0] = 0.0

        dlstm_loss = ((dlstm_loss_partial * q_vals.detach()) * mask_dlstm).sum() / mask_dlstm.sum()
        dlstm_loss += self.mac.compute_lat_state_kl_div()
        lstm_loss = ((log_pi_taken * lstm_r) * mask).sum() / mask.sum()

        self.control_actor_optimiser.zero_grad()
        dlstm_loss.backward(retain_graph=True)
        dlstm_grad_norm = torch.nn.utils.clip_grad_norm_(self.control_actor_params, self.args.grad_norm_clip)
        self.control_actor_optimiser.step()

        self.execution_actor_optimiser.zero_grad()
        lstm_loss.backward()
        lstm_grad_norm = torch.nn.utils.clip_grad_norm_(self.execution_actor_params, self.args.grad_norm_clip)
        self.execution_actor_optimiser.step()

        if (self.critic_training_steps - self.last_target_update_step) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_step = self.critic_training_steps

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            ts_logged = len(control_critic_train_stats["control_critic_td_loss"])
            for key in ["control_critic_td_loss", "control_critic_grad_norm"]:
                self.logger.log_stat(key, sum(control_critic_train_stats[key])/ts_logged, t_env)

            ts_logged = len(execution_critic_train_stats["execution_critic_td_loss"])
            for key in ["execution_critic_td_loss", "execution_critic_grad_norm"]:
                self.logger.log_stat(key, sum(execution_critic_train_stats[key]) / ts_logged, t_env)

            self.logger.log_stat("control_actor_loss", dlstm_loss.item(), t_env)
            self.logger.log_stat("execution_actor_loss", lstm_loss.item(), t_env)
            self.logger.log_stat("control_grad_norm", dlstm_grad_norm, t_env)
            self.logger.log_stat("execution_grad_norm", lstm_grad_norm, t_env)
            self.log_stats_t = t_env

    def _get_instrinsic_r(self, dirs_val, t_ep):
        r = 0
        for t in range(0, min(self.args.horizon, t_ep)):
            idx = t+1
            dec_lat_states = dirs_val[:, t_ep-idx]  # [bs][n_agents][lat_dim]
            goals = self.mac.get_prev_goal(idx)  # [batch_num][n_agents][rule_dim]
            # calculate the cosine similarity between agent's estimated contribution and its goal
            r += F.cosine_similarity(dec_lat_states, goals, dim=2)
            # penalize if agent has no goal
            r -= self.args.cum_goal_zeros_penalty_rate * F.cosine_similarity(torch.zeros(goals.shape), goals, dim=2)
        return r/self.args.horizon # [bs][n_agents]

    def _get_dlistm_partial(self, dirs_vals, queries, keys, rules, t):
        # calculate dcos(zt+c−zt,gt(θ)) in multi-agent context

        # print("dirs_vals shape: {}".format(dirs_vals.shape))
        dir_vals_t = dirs_vals[:, t]
        dir_vals_tc = dirs_vals[:, t+self.args.horizon]
        query = queries[:, t]
        key = keys[:, t]
        rule = rules[:, t]

        dlstm_partial = []
        for i in range(self.n_agents):
            # calc decomposed latent's projection on rule at t
            p_i = [torch.einsum('ij,ij->i', dir_vals_t[:, j], rule[:, i]) for j in range(self.n_agents)]
            # rule is already normalized
            p_i = torch.stack(p_i, dim=1)  # [bs, n_agents]
            p_t = [torch.einsum('i,ij->ij', p_i[:, j], rule[:, i]) for j in range(self.n_agents)]
            p_t = torch.stack(p_t, dim=1)  # [bs, n_agents, latent_state_dim]

            # calc decomposed latent's projection on rule at t+c
            p_i_c = [torch.einsum('ij,ij->i', dir_vals_tc[:, j], rule[:, i]) for j in range(self.n_agents)]
            # rule is already normalized
            p_i_c = torch.stack(p_i_c, dim=1)  # [bs, n_agents]
            p_tc = [torch.einsum('i,ij->ij', p_i_c[:, j], rule[:, i]) for j in range(self.n_agents)]
            p_tc = torch.stack(p_tc, dim=1)  # [bs, n_agents, latent_state_dim]

            p_diff = p_t - p_tc   # [bs, n_agents, latent_state_dim]

            qk_i = [torch.einsum('ij,ij->i', query[:, i], key[:, j])  # 2d torch.dot
                    / self.args.attention_noramlization_squared_dk for j in range(self.n_agents)]
            qk_i_t = torch.stack(qk_i, dim=1)  # [bs, n_agents]
            a_i = torch.nn.functional.softmax(qk_i_t, dim=1)
            # eq 2 in TarMac  # FIXME: only save attention and use attention directly

            dlstm_partial_i = [torch.einsum("i,ij->ij", 1/a_i[:, j], p_diff[:, j]) for j in range(self.n_agents)]
            dlstm_partial_i = torch.stack(dlstm_partial_i, dim=1).sum(dim=1)  # [bs, latent_state_dim]
            dlstm_partial_i = F.cosine_similarity(dlstm_partial_i, rule[:, i], dim=1)

            dlstm_partial.append(dlstm_partial_i)

        return torch.stack(dlstm_partial, dim=1)  # [bs, n_agents]

    def _train_control_critic(self, batch, rewards, terminated, mask):  # FIXME: terminated?
        bs = batch.batch_size
        qs_vals = []

        running_log = {
            "control_critic_td_loss": [],
            "control_critic_grad_norm": [],
        }

        for t in range(batch["reward"].shape[1]-1):
            # print(mask.shape)
            mask_t = mask[:, t]
            if mask_t.sum() == 0:
                continue

            qs_t = self.control_critic(batch, t)  # [bs, n_agents]
            qs_vals.append(qs_t.clone())  # [bs_size][t][lat_dim]
            qs_tot = self.control_mixer(qs_t.unsqueeze(1), batch["latent_state"][:, t].unsqueeze(1))
            
            target_qs_t = self.target_control_critic(batch, t+1)
            target_qs_tot = self.target_control_mixer(target_qs_t.unsqueeze(1), batch["latent_state"][:, t+1].unsqueeze(1))  

            td_loss = qs_tot - (rewards[:, t] + self.args.control_discount*(1 - terminated[:,t])*target_qs_tot.detach())
            td_loss = (td_loss**2).sum()

            self.control_critic_optimiser.zero_grad()
            td_loss.backward(retain_graph=True)
            grad_norm = torch.nn.utils.clip_grad_norm_(self.control_critic_params, self.args.grad_norm_clip)
            self.control_critic_optimiser.step()

            running_log["control_critic_td_loss"].append(td_loss.item())
            running_log["control_critic_grad_norm"].append(grad_norm)
        
        return torch.stack(qs_vals, dim=1), running_log

    def _train_execution_critic(self, batch, terminated, mask):
        bs = batch.batch_size
        dirs_tot_vals = []

        running_log = {
            "execution_critic_td_loss": [],
            "execution_critic_grad_norm": [],
        }

        for t in range(batch["reward"].shape[1]-1):
            # print(mask.shape)
            mask_t = mask[:, t]
            if mask_t.sum() == 0:
                continue

            # distance between latent states
            lat_state_target_dis = torch.sub(batch["latent_state"][:, t+1], batch["latent_state"][:, t])
            # [bs_size, latent_state_dim]

            dirs_t = self.execution_critic(batch, t)  # [bs, n_agents, latent_state_dim]
            dirs_tot_vals.append(dirs_t.clone())  # # [bs_size][t][lat_dim]
            dirs_tot = self.execution_mixer(dirs_t.unsqueeze(1), batch["latent_state"][:, t].unsqueeze(1))

            target_dirs_t = self.target_execution_critic(batch, t+1)
            target_dirs_tot = self.target_execution_mixer(target_dirs_t.unsqueeze(1), batch["latent_state"][:, t+1].unsqueeze(1))  

            td_loss = dirs_tot - (lat_state_target_dis + self.args.execution_discount*(1 - terminated[:, t])*target_dirs_tot.detach())
            td_loss = (td_loss ** 2).sum()

            self.execution_critic_optimiser.zero_grad()
            td_loss.backward(retain_graph=True)
            grad_norm = torch.nn.utils.clip_grad_norm_(self.control_critic_params, self.args.grad_norm_clip)
            self.execution_critic_optimiser.step()

            running_log["execution_critic_td_loss"].append(td_loss.item())
            running_log["execution_critic_grad_norm"].append(grad_norm)
        
        return torch.stack(dirs_tot_vals, dim=1), running_log

    def _update_targets(self):
        self.target_control_critic.load_state_dict(self.control_critic.state_dict())
        self.target_control_mixer.load_state_dict(self.control_mixer.state_dict())
        self.target_execution_critic.load_state_dict(self.execution_critic.state_dict())
        self.target_execution_mixer.load_state_dict(self.execution_mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.control_critic.cuda()
        self.execution_critic.cuda()
        self.target_control_critic.cuda()
        self.target_execution_critic.cuda()
        self.control_mixer.cuda()
        self.execution_mixer.cuda()
        self.target_control_mixer()
        self.target_execution_mixer()

    def save_models(self, path):
        self.mac.save_models(path)
        torch.save(self.control_critic.state_dict(), "{}/control_critic.th".format(path))
        torch.save(self.execution_critic.state_dict(), "{}/execution_critic.th".format(path))
        torch.save(self.control_mixer.state_dict(), "{}/control_mixer.th".format(path))
        torch.save(self.execution_mixer.state_dict(), "{}/execution_mixer.th".format(path))
        torch.save(self.control_critic_optimiser.state_dict(), "{}/control_critic_opt.th".format(path))
        torch.save(self.control_actor_optimiser.state_dict(), "{}/control_actor_opt.th".format(path))
        torch.save(self.execution_critic_optimiser.state_dict(), "{}/execution_critic_opt.th".format(path))
        torch.save(self.execution_actor_optimiser.state_dict(), "{}/execution_actor_opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        self.control_critic.load_state_dict(torch.load(
            "{}/control_critic.th".format(path), map_location=lambda storage, loc: storage))
        # Not quite right but I don't want to save target networks
        self.execution_critic.load_state_dict(torch.load(
            "{}/execution_critic.th".format(path), map_location=lambda storage, loc: storage))
        self.control_mixer.load_state_dict(
            torch.load("{}/control_mixer.th".format(path), map_location=lambda storage, loc: storage))
        self.execution_mixer.load_state_dict(
            torch.load("{}/execution_mixer.th".format(path), map_location=lambda storage, loc: storage))
        self.control_actor_optimiser.load_state_dict(
            torch.load("{}/control_actor_opt.th".format(path), map_location=lambda storage, loc: storage))
        self.execution_actor_optimiser.load_state_dict(
            torch.load("{}/execution_actor_opt.th".format(path), map_location=lambda storage, loc: storage))
        self.control_critic_optimiser.load_state_dict(
            torch.load("{}/control_critic_opt.th".format(path), map_location=lambda storage, loc: storage))
        self.execution_critic_optimiser.load_state_dict(
            torch.load("{}/execution_critic_opt.th".format(path), map_location=lambda storage, loc: storage))
Exemple #12
0
class QLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.params = list(mac.parameters())  # agent들의 신경망을 등록합니다.

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(
                self.mixer.parameters())  # mixing network의 신경망을 추가 등록 합니다.
            self.target_mixer = copy.deepcopy(self.mixer)

        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)

        self.target_mac = copy.deepcopy(mac)

        self.log_stats_t = -self.args.learner_log_interval - 1

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # 학습에 필요한 정보들을 정리 합니다.
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # agent 개개인의 Q 값을 산출 합니다.
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # 시간 순에 따라 Concat 합니다.

        # agent가 수행한 action에 대한 Q 값을 가져 옵니다.
        chosen_action_qvals = th.gather(
            mac_out[:, :-1], dim=3, index=actions).squeeze(3)  # 마지막 차원을 제거함.

        # agent 개개인의 target network의 Q 값을 산출 합니다
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # 첫번째 step의 target network의 Q 값은 필요없습니다.
        target_mac_out = th.stack(target_mac_out[1:],
                                  dim=1)  # 시간 순에 따라 Concat 합니다.

        # 수행 불가능한 action을 masking 합니다.
        target_mac_out[avail_actions[:, 1:] == 0] = -9999999

        if self.args.double_q:
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
            target_max_qvals = th.gather(target_mac_out, 3,
                                         cur_max_actions).squeeze(3)
        else:
            target_max_qvals = target_mac_out.max(dim=3)[0]

        # Q-tot과 target mixing network의 Q-tot를 산출합니다.
        if self.mixer is not None:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            target_max_qvals = self.target_mixer(target_max_qvals,
                                                 batch["state"][:, 1:])

        # mixing network를 위한 loss를 산출 합니다.
        targets = rewards + self.args.gamma * (1 -
                                               terminated) * target_max_qvals
        td_error = (chosen_action_qvals - targets.detach())
        mask = mask.expand_as(td_error)
        masked_td_error = td_error * mask
        loss = (masked_td_error**2).sum() / mask.sum()

        # 역전파를 수행하여 mixing network 및 agent 모두의 가중치들을 갱신합니다.
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        # 일정 주기마다 target network를 업데이트 합니다.
        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)

            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat(
                "td_error_abs",
                (masked_td_error.abs().sum().item() / mask_elems), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #13
0
class FacMADDPGLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.n_agents = args.n_agents
        self.n_actions = args.n_actions
        self.logger = logger

        self.mac = mac
        self.target_mac = copy.deepcopy(self.mac)
        self.agent_params = list(mac.parameters())

        self.critic = FacMADDPGCritic(scheme, args)
        self.target_critic = copy.deepcopy(self.critic)
        self.critic_params = list(self.critic.parameters())

        self.mixer = None
        if args.mixer is not None and self.args.n_agents > 1:  # if just 1 agent do not mix anything
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "vdn-s":
                self.mixer = VDNState(args)
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.critic_params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        if getattr(self.args, "optimizer", "rmsprop") == "rmsprop":
            self.agent_optimiser = RMSprop(params=self.agent_params,
                                           lr=args.lr,
                                           alpha=args.optim_alpha,
                                           eps=args.optim_eps)
        elif getattr(self.args, "optimizer", "rmsprop") == "adam":
            self.agent_optimiser = Adam(params=self.agent_params,
                                        lr=args.lr,
                                        eps=getattr(args, "optimizer_epsilon",
                                                    10E-8))
        else:
            raise Exception("unknown optimizer {}".format(
                getattr(self.args, "optimizer", "rmsprop")))

        if getattr(self.args, "optimizer", "rmsprop") == "rmsprop":
            self.critic_optimiser = RMSprop(params=self.critic_params,
                                            lr=args.critic_lr,
                                            alpha=args.optim_alpha,
                                            eps=args.optim_eps)
        elif getattr(self.args, "optimizer", "rmsprop") == "adam":
            self.critic_optimiser = Adam(params=self.critic_params,
                                         lr=args.critic_lr,
                                         eps=getattr(args, "optimizer_epsilon",
                                                     10E-8))
        else:
            raise Exception("unknown optimizer {}".format(
                getattr(self.args, "optimizer", "rmsprop")))

        self.log_stats_t = -self.args.learner_log_interval - 1

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = 1 - terminated

        # Train the critic
        inputs = self._build_inputs(batch, t=0)

        q_taken, _ = self.critic(inputs, actions.detach())
        q_taken = q_taken.view(batch.batch_size, -1, 1)

        if self.mixer is not None:
            q_taken = self.mixer(q_taken, batch["state"][:, :-1])

        # Use the target actor and target critic network to compute the target q
        target_actions = []
        for t in range(1, batch.max_seq_length):
            agent_target_outs = self.target_mac.select_actions(batch,
                                                               t_ep=t,
                                                               t_env=None,
                                                               test_mode=True)
            target_actions.append(agent_target_outs)
        target_actions = th.stack(target_actions, dim=1)  # Concat over time

        target_inputs = self._build_inputs(batch, t=1)
        target_vals, _ = self.target_critic(target_inputs,
                                            target_actions.detach())
        target_vals = target_vals.view(batch.batch_size, -1, 1)

        if self.mixer is not None:
            target_vals = self.target_mixer(target_vals, batch["state"][:, 1:])

        targets = rewards.expand_as(target_vals) + self.args.gamma * (
            1 - terminated.expand_as(target_vals)) * target_vals
        td_error = (q_taken - targets.detach())
        masked_td_error = td_error
        loss = (masked_td_error**2).mean()

        # Optimise the critic
        self.critic_optimiser.zero_grad()
        loss.backward()
        critic_grad_norm = th.nn.utils.clip_grad_norm_(
            self.critic_params, self.args.grad_norm_clip)
        self.critic_optimiser.step()

        # Train the actor
        pi = self.mac.forward(batch, t=0, select_actions=True)["actions"]
        q, _ = self.critic(self._build_inputs(batch, t=0), pi)
        q = q.view(batch.batch_size, -1, 1)

        # Use the joint Q to update the actor
        if self.mixer is not None:
            q = self.mixer(q, batch["state"][:, :-1])

        pg_loss = -q.mean() + (pi**2).mean() * 1e-3

        # Optimise the agents
        self.agent_optimiser.zero_grad()
        th.autograd.set_detect_anomaly(True)  # DBG
        pg_loss.backward()
        agent_grad_norm = th.nn.utils.clip_grad_norm_(self.agent_params,
                                                      self.args.grad_norm_clip)
        self.agent_optimiser.step()

        if getattr(self.args, "target_update_mode", "hard") == "hard":
            self._update_targets()
        elif getattr(self.args, "target_update_mode",
                     "hard") in ["soft", "exponential_moving_average"]:
            self._update_targets_soft(
                tau=getattr(self.args, "target_update_tau", 0.001))
        else:
            raise Exception("unknown target update mode: {}!".format(
                getattr(self.args, "target_update_mode", "hard")))

    def _update_targets_soft(self, tau):
        for target_param, param in zip(self.target_mac.parameters(),
                                       self.mac.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - tau) +
                                    param.data * tau)

        for target_param, param in zip(self.target_critic.parameters(),
                                       self.critic.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - tau) +
                                    param.data * tau)

        if self.mixer is not None:
            for target_param, param in zip(self.target_mixer.parameters(),
                                           self.mixer.parameters()):
                target_param.data.copy_(target_param.data * (1.0 - tau) +
                                        param.data * tau)

        if self.args.verbose:
            self.logger.console_logger.info(
                "Updated all target networks (soft update tau={})".format(tau))

    def _build_inputs(self, batch, t):
        bs = batch.batch_size
        inputs = []
        inputs.append(batch["obs"][:, t])

        if self.args.obs_last_action:
            if t == 0:
                inputs.append(th.zeros_like(batch["actions"][:, t]))
            else:
                inputs.append(batch["actions"][:, t - 1])
        if self.args.obs_agent_id:
            inputs.append(
                th.eye(self.n_agents,
                       device=batch.device).unsqueeze(0).expand(bs, -1, -1))

        inputs = th.cat([x.reshape(bs * self.n_agents, -1) for x in inputs],
                        dim=1)
        return inputs

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        self.target_critic.load_state_dict(self.critic.state_dict())
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.logger.console_logger.info("Updated all target networks")

    def cuda(self, device="cuda:0"):
        self.mac.cuda(device=device)
        self.target_mac.cuda(device=device)
        self.critic.cuda(device=device)
        self.target_critic.cuda(device=device)
        if self.mixer is not None:
            self.mixer.cuda(device=device)
            self.target_mixer.cuda(device=device)

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.agent_optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.agent_optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
class DDPGQLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.mac_params = list(mac.parameters())
        self.params = list(self.mac.parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        assert args.mixer is not None
        if args.mixer is not None:
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.mixer_params = list(self.mixer.parameters())
            self.params += list(self.mixer.parameters())
            self.target_mixer = copy.deepcopy(self.mixer)

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        # Central Q
        # TODO: Clean this mess up!
        self.central_mac = None
        assert self.args.central_mixer == "ff"
        self.central_mixer = QMixerCentralFF(args)
        assert args.central_mac == "basic_central_mac"
        self.central_mac = mac_REGISTRY[args.central_mac](
            scheme, args
        )  # Groups aren't used in the CentralBasicController. Little hacky
        self.target_central_mac = copy.deepcopy(self.central_mac)
        self.params += list(self.central_mac.parameters())
        self.params += list(self.central_mixer.parameters())
        self.target_central_mixer = copy.deepcopy(self.central_mixer)

        self.optimiser = RMSprop(params=self.params,
                                 lr=args.lr,
                                 alpha=args.optim_alpha,
                                 eps=args.optim_eps)

        self.log_stats_t = -self.args.learner_log_interval - 1

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, :-1]
        actions = batch["actions"][:, :-1]
        terminated = batch["terminated"][:, :-1].float()
        mask = batch["filled"][:, :-1].float()
        mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
        avail_actions = batch["avail_actions"]

        # Calculate estimated Q-Values
        mac_out = []
        self.mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.mac.forward(batch, t=t)
            mac_out.append(agent_outs)
        mac_out = th.stack(mac_out, dim=1)  # Concat over time

        # Pick the Q-Values for the actions taken by each agent
        chosen_action_qvals_agents = th.gather(mac_out[:, :-1],
                                               dim=3,
                                               index=actions).squeeze(
                                                   3)  # Remove the last dim
        chosen_action_qvals = chosen_action_qvals_agents

        # Calculate the Q-Values necessary for the target
        target_mac_out = []
        self.target_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_mac.forward(batch, t=t)
            target_mac_out.append(target_agent_outs)

        # We don't need the first timesteps Q-Value estimate for calculating targets
        target_mac_out = th.stack(target_mac_out[:],
                                  dim=1)  # Concat across time

        # Max over target Q-Values
        if self.args.double_q:
            raise Exception("No double q for DDPG")
        else:
            mac_out_detach = mac_out.clone().detach()
            mac_out_detach[avail_actions == 0] = -9999999
            _, cur_max_actions = mac_out_detach[:, :].max(dim=3, keepdim=True)
            target_mac_out_detach = target_mac_out.clone().detach()
            target_mac_out_detach[avail_actions == 0] = -9999999
            _, tar_max_actions = target_mac_out_detach[:, :].max(dim=3,
                                                                 keepdim=True)

        # Central MAC stuff
        central_mac_out = []
        self.central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            agent_outs = self.central_mac.forward(batch, t=t)
            central_mac_out.append(agent_outs)
        central_mac_out = th.stack(central_mac_out, dim=1)  # Concat over time
        central_chosen_action_qvals_agents = th.gather(
            central_mac_out[:, :-1],
            dim=3,
            index=actions.unsqueeze(4).repeat(
                1, 1, 1, 1, self.args.central_action_embed)).squeeze(
                    3)  # Remove the last dim

        central_target_mac_out = []
        self.target_central_mac.init_hidden(batch.batch_size)
        for t in range(batch.max_seq_length):
            target_agent_outs = self.target_central_mac.forward(batch, t=t)
            central_target_mac_out.append(target_agent_outs)
        central_target_mac_out = th.stack(central_target_mac_out[:],
                                          dim=1)  # Concat across time
        # Use the Qmix max actions
        central_target_max_agent_qvals = th.gather(
            central_target_mac_out[:, :], 3,
            cur_max_actions[:, :].unsqueeze(4).repeat(
                1, 1, 1, 1, self.args.central_action_embed)).squeeze(3)
        # central_target_max_agent_qvals = th.gather(central_target_mac_out[:,:], 3, tar_max_actions[:,:].unsqueeze(4).repeat(1,1,1,1,self.args.central_action_embed)).squeeze(3)
        # ---

        # Mix
        target_max_qvals = self.target_central_mixer(
            central_target_max_agent_qvals[:, 1:], batch["state"][:, 1:])

        # Calculate 1-step Q-Learning targets
        targets = rewards + self.args.gamma * (1 -
                                               terminated) * target_max_qvals

        # Bad naming, its not a qmix_actor
        qmix_actor_loss = 0
        for agent in range(self.args.n_agents):
            target_chosen_qvals = central_target_max_agent_qvals[:, :-1]
            chosen_utils = target_chosen_qvals.detach().clone()
            # For each agent compute Q(u_i, u_{-i}) for each u_i, keeping u_{-i} fixed
            qtots = []
            for action in range(self.args.n_actions):
                chosen_utils[:, :,
                             agent] = central_target_mac_out[:, :-1, agent,
                                                             action]
                new_q_tot = self.target_central_mixer(chosen_utils,
                                                      batch["state"][:, :-1])
                qtots.append(new_q_tot)
            agent_q_tots = th.cat(qtots, dim=2)

            qs_to_use = agent_q_tots

            # Train via ST Gumbel Softmax
            log_agent_policy = F.gumbel_softmax(mac_out[:, :-1, agent],
                                                hard=True,
                                                dim=2,
                                                tau=self.args.policy_temp)
            # Train via expected policy gradient
            # log_agent_policy = F.softmax(mac_out[:, :-1, agent] / self.args.policy_temp, dim=2)
            agent_policy_loss = (
                (log_agent_policy * qs_to_use.detach() *
                 avail_actions[:, :-1, agent].float()).sum(
                     dim=2, keepdim=True) * mask).sum() / mask.sum()

            qmix_actor_loss = qmix_actor_loss + agent_policy_loss

        # Logit entropy
        ps = F.softmax(mac_out[:, :-1], dim=3) * avail_actions[:, :-1]
        log_ps = F.log_softmax(mac_out[:, :-1], dim=3) * avail_actions[:, :-1]
        logit_entropy = -(((ps * log_ps).sum(dim=3) * mask).sum() / mask.sum())

        # Training central Q
        central_chosen_action_qvals = self.central_mixer(
            central_chosen_action_qvals_agents, batch["state"][:, :-1])
        central_td_error = (central_chosen_action_qvals - targets.detach())
        central_mask = mask.expand_as(central_td_error)
        central_masked_td_error = central_td_error * central_mask
        central_loss = (central_masked_td_error**2).sum() / mask.sum()

        loss = -self.args.qmix_loss * qmix_actor_loss + self.args.central_loss * central_loss + -self.args.logit_entropy * logit_entropy

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()

        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.grad_norm = grad_norm

        self.optimiser.step()

        if (episode_num - self.last_target_update_episode
            ) / self.args.target_update_interval >= 1.0:
            self._update_targets()
            self.last_target_update_episode = episode_num

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("actor_loss", qmix_actor_loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            mask_elems = mask.sum().item()
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (mask_elems * self.args.n_agents), t_env)
            self.logger.log_stat("central_loss", central_loss.item(), t_env)
            self.logger.log_stat("logit_entropy", logit_entropy.item(), t_env)
            self.log_stats_t = t_env

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        if self.central_mac is not None:
            self.target_central_mac.load_state(self.central_mac)
        self.target_central_mixer.load_state_dict(
            self.central_mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()
        if self.central_mac is not None:
            self.central_mac.cuda()
            self.target_central_mac.cuda()
        self.central_mixer.cuda()
        self.target_central_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))
Exemple #15
0
class CQLearner:
    def __init__(self, mac, scheme, logger, args):
        self.args = args
        self.mac = mac
        self.logger = logger

        self.params = list(mac.parameters())
        self.named_params = dict(mac.named_parameters())

        self.last_target_update_episode = 0

        self.mixer = None
        if args.mixer is not None and self.args.n_agents > 1:  # if just 1 agent do not mix anything
            if args.mixer == "vdn":
                self.mixer = VDNMixer()
            elif args.mixer == 'vdn-s':
                self.mixer = VDNState(args)
            elif args.mixer == "qmix":
                self.mixer = QMixer(args)
            else:
                raise ValueError("Mixer {} not recognised.".format(args.mixer))
            self.params += list(self.mixer.parameters())
            self.named_params.update(dict(self.mixer.named_parameters()))
            self.target_mixer = copy.deepcopy(self.mixer)

        if getattr(self.args, "optimizer", "rmsprop") == "rmsprop":
            self.optimiser = RMSprop(params=self.params,
                                     lr=args.lr,
                                     alpha=args.optim_alpha,
                                     eps=args.optim_eps)
        elif getattr(self.args, "optimizer", "rmsprop") == "adam":
            self.optimiser = Adam(params=self.params,
                                  lr=args.lr,
                                  eps=getattr(args, "optimizer_epsilon",
                                              10E-8))
        else:
            raise Exception("unknown optimizer {}".format(
                getattr(self.args, "optimizer", "rmsprop")))

        # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
        self.target_mac = copy.deepcopy(mac)

        self.log_stats_t = -self.args.learner_log_interval - 1

    def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
        # Get the relevant quantities
        rewards = batch["reward"][:, 0].unsqueeze(-1)
        terminated = batch["terminated"][:, 0].float().unsqueeze(-1)
        mask = 1 - terminated

        t = 0
        chosen_action_qvals, _ = self.mac.forward(
            batch, actions=batch["actions"][:, t:t + 1].detach(), t=t)

        t = 1
        best_target_action = self.target_mac.select_actions(batch,
                                                            t_ep=t,
                                                            t_env=None,
                                                            test_mode=True)
        target_max_qvals, _ = self.target_mac.forward(
            batch, t=t, actions=best_target_action.detach())

        # Mix
        if self.mixer is not None:
            chosen_action_qvals = self.mixer(chosen_action_qvals,
                                             batch["state"][:, :-1])
            target_max_qvals = self.target_mixer(target_max_qvals,
                                                 batch["state"][:, 1:])
            chosen_action_qvals = chosen_action_qvals.squeeze(-2)
            target_max_qvals = target_max_qvals.squeeze(-2)
            rewards = rewards.squeeze(-2)
            terminated = terminated.squeeze(-2)

        # Calculate 1-step Q-Learning targets
        targets = rewards.expand_as(target_max_qvals) + self.args.gamma * (
            1 - terminated.expand_as(target_max_qvals)) * target_max_qvals

        # Td-error
        td_error = (chosen_action_qvals - targets.detach())

        # Normal L2 loss, take mean over actual data
        assert self.args.runner_scope == "transition", "Runner scope HAS to be transition!"
        loss = (td_error**2).mean()

        # Optimise
        self.optimiser.zero_grad()
        loss.backward()
        grad_norm = th.nn.utils.clip_grad_norm_(self.params,
                                                self.args.grad_norm_clip)
        self.optimiser.step()

        if getattr(self.args, "target_update_mode", "hard") == "hard":
            if (episode_num - self.last_target_update_episode
                ) / self.args.target_update_interval >= 1.0:
                self._update_targets()
                self.last_target_update_episode = episode_num
        elif getattr(self.args, "target_update_mode",
                     "hard") in ["soft", "exponential_moving_average"]:
            self._update_targets_soft(
                tau=getattr(self.args, "target_update_tau", 0.001))
        else:
            raise Exception("unknown target update mode: {}!".format(
                getattr(self.args, "target_update_mode", "hard")))

        if t_env - self.log_stats_t >= self.args.learner_log_interval:
            self.logger.log_stat("loss", loss.item(), t_env)
            self.logger.log_stat("grad_norm", grad_norm, t_env)
            self.logger.log_stat("weight_norm", (th.sum(
                th.cat([th.sum(p**2).unsqueeze(0)
                        for p in self.params]))**0.5).item(), t_env)
            self.logger.log_stat("q_taken_mean",
                                 (chosen_action_qvals * mask).sum().item() /
                                 (batch.batch_size * self.args.n_agents),
                                 t_env)
            self.logger.log_stat("target_mean", (targets * mask).sum().item() /
                                 (batch.batch_size * self.args.n_agents),
                                 t_env)
            self.log_stats_t = t_env

    def _update_targets_soft(self, tau):
        for target_param, param in zip(self.target_mac.parameters(),
                                       self.mac.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - tau) +
                                    param.data * tau)

        if self.mixer is not None:
            for target_param, param in zip(self.target_mixer.parameters(),
                                           self.mixer.parameters()):
                target_param.data.copy_(target_param.data * (1.0 - tau) +
                                        param.data * tau)

        if self.args.verbose:
            self.logger.console_logger.info(
                "Updated target network (soft update tau={})".format(tau))

    def _update_targets(self):
        self.target_mac.load_state(self.mac)
        if self.mixer is not None:
            self.target_mixer.load_state_dict(self.mixer.state_dict())
        self.logger.console_logger.info("Updated target network")

    def cuda(self):
        self.mac.cuda()
        self.target_mac.cuda()
        if self.mixer is not None:
            self.mixer.cuda()
            self.target_mixer.cuda()

    def save_models(self, path):
        self.mac.save_models(path)
        if self.mixer is not None:
            th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
        th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))

    def load_models(self, path):
        self.mac.load_models(path)
        # Not quite right but I don't want to save target networks
        self.target_mac.load_models(path)
        if self.mixer is not None:
            self.mixer.load_state_dict(
                th.load("{}/mixer.th".format(path),
                        map_location=lambda storage, loc: storage))
        self.optimiser.load_state_dict(
            th.load("{}/opt.th".format(path),
                    map_location=lambda storage, loc: storage))