Exemplo n.º 1
0
    def __init__(self, net_dim, state_dim, action_dim, learning_rate=1e-4):
        super().__init__()
        self.explore_noise = 0.1  # standard deviation of explore noise
        self.policy_noise = 0.2  # standard deviation of policy noise
        self.update_freq = 2  # delay update frequency, for soft target update

        self.act = Actor(net_dim, state_dim, action_dim).to(self.device)
        self.act_target = Actor(net_dim, state_dim, action_dim).to(self.device)
        self.act_target.load_state_dict(self.act.state_dict())

        self.cri = CriticTwin(net_dim, state_dim, action_dim).to(self.device)
        self.cri_target = Critic(net_dim, state_dim,
                                 action_dim).to(self.device)
        self.cri_target.load_state_dict(self.cri.state_dict())

        self.criterion = nn.MSELoss()
        self.optimizer = torch.optim.Adam([
            {
                'params': self.act.parameters(),
                'lr': learning_rate
            },
            {
                'params': self.cri.parameters(),
                'lr': learning_rate
            },
        ],
                                          lr=learning_rate)
Exemplo n.º 2
0
    def __init__(self, net_dim, state_dim, action_dim, learning_rate=1e-4):
        super().__init__()
        self.target_entropy = np.log(action_dim)
        self.alpha_log = torch.tensor((-np.log(action_dim) * np.e, ),
                                      requires_grad=True,
                                      dtype=torch.float32,
                                      device=self.device)

        self.act = ActorSAC(net_dim, state_dim, action_dim).to(self.device)
        self.act_target = ActorSAC(net_dim, state_dim,
                                   action_dim).to(self.device)
        self.act_target.load_state_dict(self.act.state_dict())

        self.cri = CriticTwin(
            net_dim,
            state_dim,
            action_dim,
        ).to(self.device)
        self.cri_target = CriticTwin(
            net_dim,
            state_dim,
            action_dim,
        ).to(self.device)
        self.cri_target.load_state_dict(self.cri.state_dict())

        self.criterion = nn.SmoothL1Loss()
        self.optimizer = torch.optim.Adam([
            {
                'params': self.act.parameters(),
                'lr': learning_rate
            },
            {
                'params': self.cri.parameters(),
                'lr': learning_rate
            },
            {
                'params': (self.alpha_log, ),
                'lr': learning_rate
            },
        ],
                                          lr=learning_rate)
Exemplo n.º 3
0
class AgentTD3(AgentDDPG):
    def __init__(self, net_dim, state_dim, action_dim, learning_rate=1e-4):
        super().__init__(net_dim, state_dim, action_dim, learning_rate)
        self.explore_noise = 0.1  # standard deviation of explore noise
        self.policy_noise = 0.2  # standard deviation of policy noise
        self.update_freq = 2  # delay update frequency, for soft target update

        self.cri = CriticTwin(net_dim, state_dim, action_dim).to(self.device)
        self.cri_target = CriticTwin(net_dim, state_dim,
                                     action_dim).to(self.device)
        self.cri_target.load_state_dict(self.cri.state_dict())

        self.optimizer = torch.optim.Adam([{
            'params': self.act.parameters(),
            'lr': learning_rate
        }, {
            'params': self.cri.parameters(),
            'lr': learning_rate
        }])

    def update_policy(self, buffer, max_step, batch_size, repeat_times):
        buffer.update__now_len__before_sample()

        critic_obj = actor_obj = None
        for i in range(int(max_step * repeat_times)):
            with torch.no_grad():
                reward, mask, state, action, next_s = buffer.random_sample(
                    batch_size)

                next_a = self.act_target.get_action(
                    next_s, self.policy_noise)  # policy noise
                next_q = torch.min(*self.cri_target.get__q1_q2(
                    next_s, next_a))  # twin critics
                q_label = reward + mask * next_q

            q1, q2 = self.cri.get__q1_q2(state, action)
            critic_obj = self.criterion(q1, q_label) + self.criterion(
                q2, q_label)  # twin critics

            q_value_pg = self.act(state)  # policy gradient
            actor_obj = -self.cri_target(state, q_value_pg).mean()

            united_obj = actor_obj + critic_obj  # objective
            self.optimizer.zero_grad()
            united_obj.backward()
            self.optimizer.step()

            if i % self.update_freq == 0:  # delay update
                soft_target_update(self.cri_target, self.cri)
                soft_target_update(self.act_target, self.act)

        self.obj_a = actor_obj.item()
        self.obj_c = critic_obj.item()
Exemplo n.º 4
0
class AgentSAC(AgentBase):
    def __init__(self, net_dim, state_dim, action_dim, learning_rate=1e-4):
        super().__init__()
        self.target_entropy = np.log(action_dim)
        self.alpha_log = torch.tensor((-np.log(action_dim) * np.e, ),
                                      requires_grad=True,
                                      dtype=torch.float32,
                                      device=self.device)

        self.act = ActorSAC(net_dim, state_dim, action_dim).to(self.device)
        self.act_target = ActorSAC(net_dim, state_dim,
                                   action_dim).to(self.device)
        self.act_target.load_state_dict(self.act.state_dict())

        self.cri = CriticTwin(
            net_dim,
            state_dim,
            action_dim,
        ).to(self.device)
        self.cri_target = CriticTwin(
            net_dim,
            state_dim,
            action_dim,
        ).to(self.device)
        self.cri_target.load_state_dict(self.cri.state_dict())

        self.criterion = nn.MSELoss()
        self.optimizer = torch.optim.Adam([{
            'params': self.act.parameters(),
            'lr': learning_rate
        }, {
            'params': self.cri.parameters(),
            'lr': learning_rate
        }, {
            'params': (self.alpha_log, ),
            'lr': learning_rate
        }])

    def select_actions(self, states):  # states = (state, ...)
        states = torch.as_tensor(states,
                                 dtype=torch.float32,
                                 device=self.device)
        actions = self.act.get_action(states)
        return actions.detach().cpu().numpy()

    def update_policy(self, buffer, max_step, batch_size, repeat_times):
        buffer.update__now_len__before_sample()

        alpha = self.alpha_log.exp().detach()
        actor_obj = critic_obj = None
        for _ in range(int(max_step * repeat_times)):
            with torch.no_grad():
                reward, mask, state, action, next_s = buffer.random_sample(
                    batch_size)

                next_action, next_log_prob = self.act_target.get__action__log_prob(
                    next_s)
                next_q = torch.min(
                    *self.cri_target.get__q1_q2(next_s, next_action))
                q_label = reward + mask * (next_q + next_log_prob * alpha)

            q1, q2 = self.cri.get__q1_q2(state, action)
            critic_obj = self.criterion(q1, q_label) + self.criterion(
                q2, q_label)

            a_noise_pg, log_prob = self.act.get__action__log_prob(
                state)  # policy gradient
            alpha_obj = (self.alpha_log *
                         (log_prob - self.target_entropy).detach()).mean()
            with torch.no_grad():
                self.alpha_log[:] = self.alpha_log.clamp(-16, 2)

            alpha = self.alpha_log.exp().detach()
            actor_obj = -(
                torch.min(*self.cri_target.get__q1_q2(state, a_noise_pg)) +
                log_prob * alpha).mean()
            self.obj_a = 0.995 * self.obj_a + 0.005 * q_label.mean().item()

            united_obj = critic_obj + alpha_obj + actor_obj
            self.optimizer.zero_grad()
            united_obj.backward()
            self.optimizer.step()

            soft_target_update(self.cri_target, self.cri)
            soft_target_update(self.act_target, self.act)

        self.obj_a = actor_obj.item()
        self.obj_c = critic_obj.item()
Exemplo n.º 5
0
class AgentModSAC(AgentBase):
    def __init__(self, net_dim, state_dim, action_dim, learning_rate=1e-4):
        super().__init__()
        self.target_entropy = np.log(action_dim)
        self.alpha_log = torch.tensor((-np.log(action_dim) * np.e, ),
                                      requires_grad=True,
                                      dtype=torch.float32,
                                      device=self.device)

        self.act = ActorSAC(net_dim, state_dim, action_dim).to(self.device)
        self.act_target = ActorSAC(net_dim, state_dim,
                                   action_dim).to(self.device)
        self.act_target.load_state_dict(self.act.state_dict())

        self.cri = CriticTwin(
            net_dim,
            state_dim,
            action_dim,
        ).to(self.device)
        self.cri_target = CriticTwin(
            net_dim,
            state_dim,
            action_dim,
        ).to(self.device)
        self.cri_target.load_state_dict(self.cri.state_dict())

        self.criterion = nn.SmoothL1Loss()
        self.optimizer = torch.optim.Adam([
            {
                'params': self.act.parameters(),
                'lr': learning_rate
            },
            {
                'params': self.cri.parameters(),
                'lr': learning_rate
            },
            {
                'params': (self.alpha_log, ),
                'lr': learning_rate
            },
        ],
                                          lr=learning_rate)

    def select_actions(self, states):
        states = torch.as_tensor(states,
                                 dtype=torch.float32,
                                 device=self.device)
        actions = self.act.get_action(states)
        return actions.detach().cpu().numpy()

    def update_policy(self, buffer, max_step, batch_size, repeat_times):
        buffer.update__now_len__before_sample()

        k = 1.0 + buffer.now_len / buffer.max_len
        batch_size_ = int(batch_size * k)
        train_steps = int(max_step * k * repeat_times)

        alpha = self.alpha_log.exp().detach()
        update_a = 0
        for update_c in range(1, train_steps):
            with torch.no_grad():
                reward, mask, state, action, next_s = buffer.random_sample(
                    batch_size_)

                next_action, next_log_prob = self.act_target.get__action__log_prob(
                    next_s)
                # print(';', next_s.shape, next_action.shape, next_log_prob.shape)
                q_label = reward + mask * (torch.min(
                    *self.cri_target.get__q1_q2(next_s, next_action)) +
                                           next_log_prob * alpha)

            q1, q2 = self.cri.get__q1_q2(state, action)
            cri_obj = self.criterion(q1, q_label) + self.criterion(q2, q_label)
            self.obj_c = 0.995 * self.obj_c + 0.0025 * cri_obj.item()

            a_noise_pg, log_prob = self.act.get__action__log_prob(
                state)  # policy gradient
            alpha_obj = (self.alpha_log *
                         (log_prob - self.target_entropy).detach()).mean()
            with torch.no_grad():
                self.alpha_log[:] = self.alpha_log.clamp(-16, 2)

            lamb = np.exp(-self.obj_c**2)
            if_update_a = update_a / update_c < 1 / (2 - lamb)
            if if_update_a:  # auto TTUR
                update_a += 1

                alpha = self.alpha_log.exp().detach()
                act_obj = -(
                    torch.min(*self.cri_target.get__q1_q2(state, a_noise_pg)) +
                    log_prob * alpha).mean()
                self.obj_a = 0.995 * self.obj_a + 0.005 * q_label.mean().item()

                united_obj = cri_obj + alpha_obj + act_obj
            else:
                united_obj = cri_obj + alpha_obj

            self.optimizer.zero_grad()
            united_obj.backward()
            self.optimizer.step()

            soft_target_update(self.cri_target, self.cri)
            soft_target_update(self.act_target,
                               self.act) if if_update_a else None