class AgentTD3(AgentDDPG): def __init__(self, net_dim, state_dim, action_dim, learning_rate=1e-4): super().__init__(net_dim, state_dim, action_dim, learning_rate) self.explore_noise = 0.1 # standard deviation of explore noise self.policy_noise = 0.2 # standard deviation of policy noise self.update_freq = 2 # delay update frequency, for soft target update self.cri = CriticTwin(net_dim, state_dim, action_dim).to(self.device) self.cri_target = deepcopy(self.cri) self.optimizer = torch.optim.Adam([{ 'params': self.act.parameters(), 'lr': learning_rate }, { 'params': self.cri.parameters(), 'lr': learning_rate }]) def update_policy(self, buffer, max_step, batch_size, repeat_times): buffer.update__now_len__before_sample() obj_critic = obj_actor = None for i in range(int(max_step * repeat_times)): with torch.no_grad(): reward, mask, action, state, next_s = buffer.random_sample( batch_size) next_a = self.act_target.get_action( next_s, self.policy_noise) # policy noise next_q = torch.min(*self.cri_target.get__q1_q2( next_s, next_a)) # twin critics q_label = reward + mask * next_q q1, q2 = self.cri.get__q1_q2(state, action) obj_critic = self.criterion(q1, q_label) + self.criterion( q2, q_label) # twin critics q_value_pg = self.act(state) # policy gradient obj_actor = -self.cri_target(state, q_value_pg).mean() obj_united = obj_actor + obj_critic # objective self.optimizer.zero_grad() obj_united.backward() self.optimizer.step() if i % self.update_freq == 0: # delay update soft_target_update(self.cri_target, self.cri) soft_target_update(self.act_target, self.act) return obj_actor.item(), obj_critic.item() / 2
class AgentSAC(AgentBase): def __init__(self, net_dim, state_dim, action_dim, learning_rate=1e-4): super().__init__() self.target_entropy = np.log(action_dim) self.alpha_log = torch.tensor( (-np.log(action_dim) * np.e, ), dtype=torch.float32, requires_grad=True, device=self.device) # trainable parameter self.act = ActorSAC(net_dim, state_dim, action_dim).to(self.device) self.act_target = deepcopy(self.act) self.cri = CriticTwin( int(net_dim * 1.25), state_dim, action_dim, ).to(self.device) self.cri_target = deepcopy(self.cri) self.criterion = torch.nn.SmoothL1Loss() self.optimizer = torch.optim.Adam([{ 'params': self.act.parameters(), 'lr': learning_rate * 0.75 }, { 'params': self.cri.parameters(), 'lr': learning_rate * 1.25 }, { 'params': (self.alpha_log, ), 'lr': learning_rate }]) def select_actions(self, states): # states = (state, ...) states = torch.as_tensor(states, dtype=torch.float32, device=self.device) actions = self.act.get_action(states) return actions.detach().cpu().numpy() def update_policy(self, buffer, max_step, batch_size, repeat_times): buffer.update__now_len__before_sample() alpha = self.alpha_log.exp().detach() obj_actor = obj_critic = None for _ in range(int(max_step * repeat_times)): with torch.no_grad(): reward, mask, action, state, next_s = buffer.random_sample( batch_size) next_a, next_log_prob = self.act_target.get__action__log_prob( next_s) next_q = torch.min(*self.cri_target.get__q1_q2(next_s, next_a)) q_label = reward + mask * (next_q + next_log_prob * alpha) q1, q2 = self.cri.get__q1_q2(state, action) obj_critic = self.criterion(q1, q_label) + self.criterion( q2, q_label) action_pg, log_prob = self.act.get__action__log_prob( state) # policy gradient obj_alpha = (self.alpha_log * (log_prob - self.target_entropy).detach()).mean() alpha = self.alpha_log.exp().detach() with torch.no_grad(): self.alpha_log[:] = self.alpha_log.clamp(-16, 2) obj_actor = -( torch.min(*self.cri_target.get__q1_q2(state, action_pg)) + log_prob * alpha).mean() obj_united = obj_critic + obj_alpha + obj_actor self.optimizer.zero_grad() obj_united.backward() self.optimizer.step() soft_target_update(self.cri_target, self.cri) soft_target_update(self.act_target, self.act) # return obj_actor.item(), obj_critic.item() return alpha.item(), obj_critic.item()