def __init__(self, state_dim, action_dim, cfg): self.device = cfg.device self.critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()): target_param.data.copy_(param.data) self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.critic_lr) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.batch_size = cfg.batch_size self.soft_tau = cfg.soft_tau self.gamma = cfg.gamma
def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim self.state_dim = state_dim self.loss = 0 self.gamma = cfg.gamma self.frame_idx = 0 # 用于epsilon的衰减计数 self.epsilon = lambda frame_idx: cfg.epsilon_end + (cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay) self.batch_size = cfg.batch_size self.device = cfg.device self.policy_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity)
def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim # 总的动作个数 self.device = cfg.device # 设备,cpu或gpu等 self.gamma = cfg.gamma # 奖励的折扣因子 # e-greedy策略相关参数 self.frame_idx = 0 # 用于epsilon的衰减计数 self.epsilon = lambda frame_idx: cfg.epsilon_end + \ (cfg.epsilon_start - cfg.epsilon_end) * \ math.exp(-1. * frame_idx / cfg.epsilon_decay) self.batch_size = cfg.batch_size self.policy_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device) self.target_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device) for target_param, param in zip(self.target_net.parameters(),self.policy_net.parameters()): # copy params from policy net target_param.data.copy_(param.data) self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity)
class DQN: def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim self.state_dim = state_dim self.loss = 0 self.gamma = cfg.gamma self.frame_idx = 0 # 用于epsilon的衰减计数 self.epsilon = lambda frame_idx: cfg.epsilon_end + (cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay) self.batch_size = cfg.batch_size self.device = cfg.device self.policy_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity) def choose_action(self, state): '''policy_net负责与环境进行互动并产生相关动作存放到经验池中,因为后边会采样经验池中的数据来重新生成相关Q值,所以此处不进行梯度的更新''' self.frame_idx += 1 if random.random() > self.epsilon(self.frame_idx): with torch.no_grad(): # 使用该语句,使policy_net网络不会进行更新 state = torch.tensor([state], device=self.device, dtype=torch.float32) q_value = self.policy_net(state) action = q_value.max(1)[1].item() # tensor.max(1)[1]返回最大值对应的下标,即action else: action = random.randrange(self.action_dim) return action def update(self): if len(self.memory) < self.batch_size: return state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(self.batch_size) ''' 转换为Tensor ''' state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float) action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1) reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float) next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float) done_batch = torch.tensor(np.float32(done_batch), device=self.device) # 计算当前(s_t, a)对应的Q值,此处的Q值用来训练,所以要求计算梯度; # 其实也可以在choose_action时将q值存到经验池中,就可以不同进行下一步的计算了 q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch) # 计算s_t+1状态下target_net网络的最大值 next_q_values = self.target_net(next_state_batch).max(1)[0].detach() # 由target_net输出的值不会参与到梯度的计算中 # 对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward expected_q_values = reward_batch + self.gamma * next_q_values * (1-done_batch) self.loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) self.optimizer.zero_grad() self.loss.backward() self.optimizer.step() def save(self, path): torch.save(self.target_net.state_dict(), path+'DQN_CheckPoint.pth') def load(self, path): self.target_net.load_state_dict(torch.load(path+'DQN_CheckPoint.pth'))
def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim # 总的动作个数 self.device = cfg.device # 设备,cpu或gpu等 self.gamma = cfg.gamma # e-greedy策略相关参数 self.actions_count = 0 self.epsilon_start = cfg.epsilon_start self.epsilon_end = cfg.epsilon_end self.epsilon_decay = cfg.epsilon_decay self.batch_size = cfg.batch_size self.policy_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device) self.target_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device) # target_net的初始模型参数完全复制policy_net self.target_net.load_state_dict(self.policy_net.state_dict()) self.target_net.eval() # 不启用 BatchNormalization 和 Dropout # 可查parameters()与state_dict()的区别,前者require_grad=True self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.loss = 0 self.memory = ReplayBuffer(cfg.memory_capacity)
def __init__(self, state_dim, action_dim, cfg) -> None: self.batch_size = cfg.batch_size self.memory = ReplayBuffer(cfg.capacity) self.device = cfg.device self.value_net = ValueNet(state_dim, cfg.hidden_dim).to(self.device) self.target_value_net = ValueNet(state_dim, cfg.hidden_dim).to(self.device) self.soft_q_net = SoftQNet(state_dim, action_dim, cfg.hidden_dim).to(self.device) self.policy_net = PolicyNet(state_dim, action_dim, cfg.hidden_dim).to(self.device) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=cfg.value_lr) self.soft_q_optimizer = optim.Adam(self.soft_q_net.parameters(), lr=cfg.soft_q_lr) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.policy_lr) for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) self.value_criterion = nn.MSELoss() self.soft_q_criterion = nn.MSELoss()
def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim self.device = cfg.device self.batch_size = cfg.batch_size self.sample_count = 0 self.epsilon = 0 self.epsilon_start = cfg.epsilon_start self.epsilon_end = cfg.epsilon_end self.epsilon_decay = cfg.epsilon_decay self.batch_size = cfg.batch_size self.policy_net = MLP(2 * state_dim, action_dim, cfg.hidden_dim).to(self.device) self.target_net = MLP(2 * state_dim, action_dim, cfg.hidden_dim).to(self.device) self.meta_policy_net = MLP(state_dim, state_dim, cfg.hidden_dim).to(self.device) self.meta_target_net = MLP(state_dim, state_dim, cfg.hidden_dim).to(self.device) self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.meta_optimizer = optim.Adam(self.meta_policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.meta_memory = ReplayBuffer(cfg.memory_capacity)
def __init__(self, state_dim, action_dim, cfg): self.state_dim = state_dim self.action_dim = action_dim self.gamma = cfg.gamma self.device = cfg.device self.batch_size = cfg.batch_size self.frame_idx = 0 self.epsilon = lambda frame_idx: cfg.epsilon_end + ( cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay) self.policy_net = MLP(2 * state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.meta_policy_net = MLP(state_dim, state_dim, cfg.hidden_dim).to( cfg.device) # 高层策略用于产生高层指导动作,输出动作分布等价于状态分布 self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.meta_optimizer = optim.Adam(self.meta_policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.meta_memory = ReplayBuffer(cfg.memory_capacity) self.loss_numpy = 0 self.meta_loss_numpy = 0 self.losses = [] self.meta_losses = []
def __init__(self, state_dim, action_dim, cfg): self.device = cfg.device # 定义网络 self.critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) # 定义优化器 self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.critic_lr) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.batch_size = cfg.batch_size self.soft_tau = cfg.soft_tau # 目标网络软更新 self.gamma = cfg.gamma # 同步target网络 self.target_critic.load_state_dict(self.critic.state_dict()) self.target_actor.load_state_dict(self.actor.state_dict())
class DQN: def __init__(self, n_states, n_actions, cfg): self.n_actions = n_actions # 总的动作个数 self.device = cfg.device # 设备,cpu或gpu等 self.gamma = cfg.gamma # 奖励的折扣因子 # e-greedy策略相关参数 self.sample_count = 0 # 用于epsilon的衰减计数 self.epsilon = 0 self.epsilon_start = cfg.epsilon_start self.epsilon_end = cfg.epsilon_end self.epsilon_decay = cfg.epsilon_decay self.batch_size = cfg.batch_size self.policy_net = MLP2(n_states, n_actions, hidden_dim=cfg.hidden_dim).to(self.device) self.target_net = MLP2(n_states, n_actions, hidden_dim=cfg.hidden_dim).to(self.device) # target_net的初始模型参数完全复制policy_net self.target_net.load_state_dict(self.policy_net.state_dict()) self.target_net.eval() # 不启用 BatchNormalization 和 Dropout # 可查parameters()与state_dict()的区别,前者require_grad=True self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.loss = 0 self.memory = ReplayBuffer(cfg.memory_capacity) def choose_action(self, state, train=True): '''选择动作 ''' if train: self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \ math.exp(-1. * self.sample_count / self.epsilon_decay) self.sample_count += 1 if random.random() > self.epsilon: with torch.no_grad(): # 先转为张量便于丢给神经网络,state元素数据原本为float64 # 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价 state = torch.tensor([state], device=self.device, dtype=torch.float32) # 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>) q_value = self.policy_net(state) # tensor.max(1)返回每行的最大值以及对应的下标, # 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0])) # 所以tensor.max(1)[1]返回最大值对应的下标,即action action = q_value.max(1)[1].item() else: action = random.randrange(self.n_actions) return action else: with torch.no_grad(): # 取消保存梯度 # 先转为张量便于丢给神经网络,state元素数据原本为float64 # 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价 state = torch.tensor( [state], device='cpu', dtype=torch.float32 ) # 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>) q_value = self.target_net(state) # tensor.max(1)返回每行的最大值以及对应的下标, # 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0])) # 所以tensor.max(1)[1]返回最大值对应的下标,即action action = q_value.max(1)[1].item() return action def update(self): if len(self.memory) < self.batch_size: return # 从memory中随机采样transition state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample( self.batch_size) '''转为张量 例如tensor([[-4.5543e-02, -2.3910e-01, 1.8344e-02, 2.3158e-01],...,[-1.8615e-02, -2.3921e-01, -1.1791e-02, 2.3400e-01]])''' state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float) action_batch = torch.tensor(action_batch, device=self.device).unsqueeze( 1) # 例如tensor([[1],...,[0]]) reward_batch = torch.tensor( reward_batch, device=self.device, dtype=torch.float) # tensor([1., 1.,...,1]) next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float) done_batch = torch.tensor(np.float32(done_batch), device=self.device).unsqueeze( 1) # 将bool转为float然后转为张量 '''计算当前(s_t,a)对应的Q(s_t, a)''' '''torch.gather:对于a=torch.Tensor([[1,2],[3,4]]),那么a.gather(1,torch.Tensor([[0],[1]]))=torch.Tensor([[1],[3]])''' q_values = self.policy_net(state_batch).gather( dim=1, index=action_batch) # 等价于self.forward # 计算所有next states的V(s_{t+1}),即通过target_net中选取reward最大的对应states next_state_values = self.target_net(next_state_batch).max( 1)[0].detach() # 比如tensor([ 0.0060, -0.0171,...,]) # 计算 expected_q_value # 对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward expected_q_values = reward_batch + self.gamma * \ next_state_values * (1-done_batch[0]) # self.loss = F.smooth_l1_loss(q_values,expected_q_values.unsqueeze(1)) # 计算 Huber loss self.loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) # 计算 均方误差loss # 优化模型 self.optimizer.zero_grad( ) # zero_grad清除上一步所有旧的gradients from the last step # loss.backward()使用backpropagation计算loss相对于所有parameters(需要gradients)的微分 self.loss.backward() for param in self.policy_net.parameters(): # clip防止梯度爆炸 param.grad.data.clamp_(-1, 1) self.optimizer.step() # 更新模型 def save(self, path): torch.save(self.target_net.state_dict(), path + 'dqn_checkpoint.pth') def load(self, path): self.target_net.load_state_dict(torch.load(path + 'dqn_checkpoint.pth'))
class HierarchicalDQN: def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim self.device = cfg.device self.batch_size = cfg.batch_size self.sample_count = 0 self.epsilon = 0 self.epsilon_start = cfg.epsilon_start self.epsilon_end = cfg.epsilon_end self.epsilon_decay = cfg.epsilon_decay self.batch_size = cfg.batch_size self.policy_net = MLP(2 * state_dim, action_dim, cfg.hidden_dim).to(self.device) self.target_net = MLP(2 * state_dim, action_dim, cfg.hidden_dim).to(self.device) self.meta_policy_net = MLP(state_dim, state_dim, cfg.hidden_dim).to(self.device) self.meta_target_net = MLP(state_dim, state_dim, cfg.hidden_dim).to(self.device) self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.meta_optimizer = optim.Adam(self.meta_policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.meta_memory = ReplayBuffer(cfg.memory_capacity) def to_onehot(x): oh = np.zeros(6) oh[x - 1] = 1. return oh def set_goal(self, meta_state): self.epsilon = self.epsilon_end + ( self.epsilon_start - self.epsilon_end) * math.exp( -1. * self.sample_count / self.epsilon_decay) self.sample_count += 1 if random.random() > self.epsilon: with torch.no_grad(): meta_state = torch.tensor([meta_state], device=self.device, dtype=torch.float32) q_value = self.policy_net(meta_state) goal = q_value.max(1)[1].item() else: goal = random.randrange(self.action_dim) goal = self.meta_policy_net(meta_state) onehot_goal = self.to_onehot(goal) return onehot_goal def choose_action(self, state): self.epsilon = self.epsilon_end + ( self.epsilon_start - self.epsilon_end) * math.exp( -1. * self.sample_count / self.epsilon_decay) self.sample_count += 1 if random.random() > self.epsilon: with torch.no_grad(): state = torch.tensor([state], device=self.device, dtype=torch.float32) q_value = self.policy_net(state) action = q_value.max(1)[1].item() else: action = random.randrange(self.action_dim) return action def update(self): if self.batch_size > len(self.memory): state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample( self.batch_size) state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float) action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1) reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float) next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float) done_batch = torch.tensor(np.float32(done_batch), device=self.device).unsqueeze(1) q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch) next_state_values = self.target_net(next_state_batch).max( 1)[0].detach() expected_q_values = reward_batch + self.gamma * next_state_values * ( 1 - done_batch[0]) loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) self.optimizer.zero_grad() loss.backward() for param in self.policy_net.parameters(): param.grad.data.clamp_(-1, 1) self.optimizer.step() if self.batch_size > len(self.meta_memory): meta_state_batch, meta_action_batch, meta_reward_batch, next_meta_state_batch, meta_done_batch = self.memory.sample( self.batch_size) meta_state_batch = torch.tensor(meta_state_batch, device=self.device, dtype=torch.float) meta_action_batch = torch.tensor(meta_action_batch, device=self.device).unsqueeze(1) meta_reward_batch = torch.tensor(meta_reward_batch, device=self.device, dtype=torch.float) next_meta_state_batch = torch.tensor(next_meta_state_batch, device=self.device, dtype=torch.float) meta_done_batch = torch.tensor(np.float32(meta_done_batch), device=self.device).unsqueeze(1) meta_q_values = self.meta_policy_net(meta_state_batch).gather( dim=1, index=meta_action_batch) next_state_values = self.target_net(next_meta_state_batch).max( 1)[0].detach() expected_meta_q_values = meta_reward_batch + self.gamma * next_state_values * ( 1 - meta_done_batch[0]) meta_loss = nn.MSEmeta_loss()(meta_q_values, expected_meta_q_values.unsqueeze(1)) self.meta_optimizer.zero_grad() meta_loss.backward() for param in self.meta_policy_net.parameters(): param.grad.data.clamp_(-1, 1) self.meta_optimizer.step()
class HierarchicalDQN: def __init__(self, state_dim, action_dim, cfg): self.state_dim = state_dim self.action_dim = action_dim self.gamma = cfg.gamma self.device = cfg.device self.batch_size = cfg.batch_size self.frame_idx = 0 self.epsilon = lambda frame_idx: cfg.epsilon_end + ( cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay) self.policy_net = MLP(2 * state_dim, action_dim, cfg.hidden_dim).to(self.device) self.meta_policy_net = MLP(state_dim, state_dim, cfg.hidden_dim).to(self.device) self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.meta_optimizer = optim.Adam(self.meta_policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.meta_memory = ReplayBuffer(cfg.memory_capacity) self.loss_numpy = 0 self.meta_loss_numpy = 0 self.losses = [] self.meta_losses = [] def to_onehot(self, x): oh = np.zeros(self.state_dim) oh[x - 1] = 1. return oh def set_goal(self, state): if random.random() > self.epsilon(self.frame_idx): with torch.no_grad(): state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(0) goal = self.meta_policy_net(state).max(1)[1].item() else: goal = random.randrange(self.state_dim) return goal def choose_action(self, state): self.frame_idx += 1 if random.random() > self.epsilon(self.frame_idx): with torch.no_grad(): state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(0) q_value = self.policy_net(state) action = q_value.max(1)[1].item() else: action = random.randrange(self.action_dim) return action def update(self): self.update_policy() self.update_meta() def update_policy(self): if self.batch_size > len(self.memory): return state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample( self.batch_size) state_batch = torch.tensor(state_batch, dtype=torch.float) action_batch = torch.tensor(action_batch, dtype=torch.int64).unsqueeze(1) reward_batch = torch.tensor(reward_batch, dtype=torch.float) next_state_batch = torch.tensor(next_state_batch, dtype=torch.float) done_batch = torch.tensor(np.float32(done_batch)) q_values = self.policy_net(state_batch).gather( dim=1, index=action_batch).squeeze(1) next_state_values = self.policy_net(next_state_batch).max( 1)[0].detach() expected_q_values = reward_batch + 0.99 * next_state_values * ( 1 - done_batch) loss = nn.MSELoss()(q_values, expected_q_values) self.optimizer.zero_grad() loss.backward() for param in self.policy_net.parameters(): # clip防止梯度爆炸 param.grad.data.clamp_(-1, 1) self.optimizer.step() self.loss_numpy = loss.detach().numpy() self.losses.append(self.loss_numpy) def update_meta(self): if self.batch_size > len(self.meta_memory): return state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.meta_memory.sample( self.batch_size) state_batch = torch.tensor(state_batch, dtype=torch.float) action_batch = torch.tensor(action_batch, dtype=torch.int64).unsqueeze(1) reward_batch = torch.tensor(reward_batch, dtype=torch.float) next_state_batch = torch.tensor(next_state_batch, dtype=torch.float) done_batch = torch.tensor(np.float32(done_batch)) q_values = self.meta_policy_net(state_batch).gather( dim=1, index=action_batch).squeeze(1) next_state_values = self.meta_policy_net(next_state_batch).max( 1)[0].detach() expected_q_values = reward_batch + 0.99 * next_state_values * ( 1 - done_batch) meta_loss = nn.MSELoss()(q_values, expected_q_values) self.meta_optimizer.zero_grad() meta_loss.backward() for param in self.meta_policy_net.parameters(): # clip防止梯度爆炸 param.grad.data.clamp_(-1, 1) self.meta_optimizer.step() self.meta_loss_numpy = meta_loss.detach().numpy() self.meta_losses.append(self.meta_loss_numpy) def save(self, path): torch.save(self.policy_net.state_dict(), path + 'policy_checkpoint.pth') torch.save(self.meta_policy_net.state_dict(), path + 'meta_checkpoint.pth') def load(self, path): self.policy_net.load_state_dict( torch.load(path + 'policy_checkpoint.pth')) self.meta_policy_net.load_state_dict( torch.load(path + 'meta_checkpoint.pth'))
class DQN: def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim # 总的动作个数 self.device = cfg.device # 设备,cpu或gpu等 self.gamma = cfg.gamma # 奖励的折扣因子 # e-greedy策略相关参数 self.frame_idx = 0 # 用于epsilon的衰减计数 self.epsilon = lambda frame_idx: cfg.epsilon_end + \ (cfg.epsilon_start - cfg.epsilon_end) * \ math.exp(-1. * frame_idx / cfg.epsilon_decay) self.batch_size = cfg.batch_size self.policy_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device) self.target_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device) for target_param, param in zip(self.target_net.parameters(),self.policy_net.parameters()): # copy params from policy net target_param.data.copy_(param.data) self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.memory = ReplayBuffer(cfg.memory_capacity) def choose_action(self, state): '''选择动作 ''' self.frame_idx += 1 if random.random() > self.epsilon(self.frame_idx): action = self.predict(state) else: action = random.randrange(self.action_dim) return action def predict(self,state): with torch.no_grad(): state = torch.tensor([state], device=self.device, dtype=torch.float32) q_values = self.policy_net(state) action = q_values.max(1)[1].item() return action def update(self): if len(self.memory) < self.batch_size: return # 从memory中随机采样transition state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample( self.batch_size) '''转为张量 例如tensor([[-4.5543e-02, -2.3910e-01, 1.8344e-02, 2.3158e-01],...,[-1.8615e-02, -2.3921e-01, -1.1791e-02, 2.3400e-01]])''' state_batch = torch.tensor( state_batch, device=self.device, dtype=torch.float) action_batch = torch.tensor(action_batch, device=self.device).unsqueeze( 1) # 例如tensor([[1],...,[0]]) reward_batch = torch.tensor( reward_batch, device=self.device, dtype=torch.float) # tensor([1., 1.,...,1]) next_state_batch = torch.tensor( next_state_batch, device=self.device, dtype=torch.float) done_batch = torch.tensor(np.float32( done_batch), device=self.device) '''计算当前(s_t,a)对应的Q(s_t, a)''' '''torch.gather:对于a=torch.Tensor([[1,2],[3,4]]),那么a.gather(1,torch.Tensor([[0],[1]]))=torch.Tensor([[1],[3]])''' q_values = self.policy_net(state_batch).gather( dim=1, index=action_batch) # 等价于self.forward # 计算所有next states的V(s_{t+1}),即通过target_net中选取reward最大的对应states next_q_values = self.target_net(next_state_batch).max( 1)[0].detach() # 比如tensor([ 0.0060, -0.0171,...,]) # 计算 expected_q_value # 对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward expected_q_values = reward_batch + \ self.gamma * next_q_values * (1-done_batch) # self.loss = F.smooth_l1_loss(q_values,expected_q_values.unsqueeze(1)) # 计算 Huber loss loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) # 计算 均方误差loss # 优化模型 self.optimizer.zero_grad() # zero_grad清除上一步所有旧的gradients from the last step # loss.backward()使用backpropagation计算loss相对于所有parameters(需要gradients)的微分 loss.backward() # for param in self.policy_net.parameters(): # clip防止梯度爆炸 # param.grad.data.clamp_(-1, 1) self.optimizer.step() # 更新模型 def save(self, path): torch.save(self.target_net.state_dict(), path+'dqn_checkpoint.pth') def load(self, path): self.target_net.load_state_dict(torch.load(path+'dqn_checkpoint.pth')) for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()): param.data.copy_(target_param.data)
class DDPG: def __init__(self, state_dim, action_dim, cfg): self.device = cfg.device self.critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()): target_param.data.copy_(param.data) self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.critic_lr) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.batch_size = cfg.batch_size self.soft_tau = cfg.soft_tau self.gamma = cfg.gamma def choose_action(self, state): state = torch.FloatTensor(state).unsqueeze(0).to(self.device) action = self.actor(state) # torch.detach()用于切断反向传播 return action.detach().cpu().numpy()[0, 0] def update(self): if len(self.memory) < self.batch_size: return state, action, reward, next_state, done = self.memory.sample( self.batch_size) # 将所有变量转为张量 state = torch.FloatTensor(state).to(self.device) next_state = torch.FloatTensor(next_state).to(self.device) action = torch.FloatTensor(action).to(self.device) reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device) done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device) # 注意critic将(s_t,a)作为输入 policy_loss = self.critic(state, self.actor(state)) policy_loss = -policy_loss.mean() next_action = self.target_actor(next_state) target_value = self.target_critic(next_state, next_action.detach()) expected_value = reward + (1.0 - done) * self.gamma * target_value expected_value = torch.clamp(expected_value, -np.inf, np.inf) value = self.critic(state, action) value_loss = nn.MSELoss()(value, expected_value.detach()) self.actor_optimizer.zero_grad() policy_loss.backward() self.actor_optimizer.step() self.critic_optimizer.zero_grad() value_loss.backward() self.critic_optimizer.step() for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()): target_param.data.copy_(target_param.data * (1.0 - self.soft_tau) + param.data * self.soft_tau) for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()): target_param.data.copy_(target_param.data * (1.0 - self.soft_tau) + param.data * self.soft_tau) def save(self, path): torch.save(self.actor.state_dict(), path + 'checkpoint.pt') def load(self, path): self.actor.load_state_dict(torch.load(path + 'checkpoint.pt'))
class DoubleDQN: def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim # 总的动作个数 self.device = cfg.device # 设备,cpu或gpu等 self.gamma = cfg.gamma # e-greedy策略相关参数 self.actions_count = 0 self.epsilon_start = cfg.epsilon_start self.epsilon_end = cfg.epsilon_end self.epsilon_decay = cfg.epsilon_decay self.batch_size = cfg.batch_size self.policy_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device) self.target_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device) # target_net的初始模型参数完全复制policy_net self.target_net.load_state_dict(self.policy_net.state_dict()) self.target_net.eval() # 不启用 BatchNormalization 和 Dropout # 可查parameters()与state_dict()的区别,前者require_grad=True self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.loss = 0 self.memory = ReplayBuffer(cfg.memory_capacity) def predict(self, state): with torch.no_grad(): # 先转为张量便于丢给神经网络,state元素数据原本为float64 # 注意state=torch.tensor(state).unsqueeze(0)跟state=torch.tensor([state])等价 state = torch.tensor([state], device=self.device, dtype=torch.float32) # 如tensor([[-0.0798, -0.0079]], grad_fn=<AddmmBackward>) q_value = self.policy_net(state) # tensor.max(1)返回每行的最大值以及对应的下标, # 如torch.return_types.max(values=tensor([10.3587]),indices=tensor([0])) # 所以tensor.max(1)[1]返回最大值对应的下标,即action action = q_value.max(1)[1].item() return action def choose_action(self, state): '''选择动作 ''' self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \ math.exp(-1. * self.actions_count / self.epsilon_decay) self.actions_count += 1 if random.random() > self.epsilon: action = self.predict(state) else: action = random.randrange(self.action_dim) return action def update(self): if len(self.memory) < self.batch_size: return # 从memory中随机采样transition state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample( self.batch_size) ### 转为张量 ### state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float) action_batch = torch.tensor(action_batch, device=self.device).unsqueeze( 1) # 例如tensor([[1],...,[0]]) reward_batch = torch.tensor( reward_batch, device=self.device, dtype=torch.float) # tensor([1., 1.,...,1]) next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float) done_batch = torch.tensor(np.float32(done_batch), device=self.device).unsqueeze( 1) # 将bool转为float然后转为张量 # 计算当前(s_t,a)对应的Q(s_t, a) q_values = self.policy_net(state_batch) next_q_values = self.policy_net(next_state_batch) # 代入当前选择的action,得到Q(s_t|a=a_t) q_value = q_values.gather(dim=1, index=action_batch) '''以下是Nature DQN的q_target计算方式 # 计算所有next states的Q'(s_{t+1})的最大值,Q'为目标网络的q函数 next_q_state_value = self.target_net( next_state_batch).max(1)[0].detach() # 比如tensor([ 0.0060, -0.0171,...,]) # 计算 q_target # 对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward q_target = reward_batch + self.gamma * next_q_state_value * (1-done_batch[0]) ''' '''以下是Double DQN q_target计算方式,与NatureDQN稍有不同''' next_target_values = self.target_net(next_state_batch) # 选出Q(s_t‘, a)对应的action,代入到next_target_values获得target net对应的next_q_value,即Q’(s_t|a=argmax Q(s_t‘, a)) next_target_q_value = next_target_values.gather( 1, torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1) q_target = reward_batch + self.gamma * next_target_q_value * ( 1 - done_batch[0]) self.loss = nn.MSELoss()(q_value, q_target.unsqueeze(1)) # 计算 均方误差loss # 优化模型 self.optimizer.zero_grad( ) # zero_grad清除上一步所有旧的gradients from the last step # loss.backward()使用backpropagation计算loss相对于所有parameters(需要gradients)的微分 self.loss.backward() for param in self.policy_net.parameters(): # clip防止梯度爆炸 param.grad.data.clamp_(-1, 1) self.optimizer.step() # 更新模型 def save(self, path): torch.save(self.target_net.state_dict(), path + 'checkpoint.pth') def load(self, path): self.target_net.load_state_dict(torch.load(path + 'checkpoint.pth')) for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()): param.data.copy_(target_param.data)
class Agent: def __init__(self, env, state_dims, action_dims, actor_lr, critic_lr, tau=0.005, gamma=0.99, buffer_size=1000000, fc1_size=400, fc2_size=300, batch_size=100, noise=0.1, update_frequency=2, warmup=1000, seed=0): np.random.seed(seed) self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.actor = Actor(actor_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.critic_1 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.critic_2 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.target_actor = Actor(actor_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.target_critic_1 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.target_critic_2 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.memory = ReplayBuffer(buffer_size, seed=seed) self.batch_size = batch_size self.gamma = gamma self.tau = tau self.noise = noise self.max_action = env.action_space.high self.min_action = env.action_space.low self.state_dims = state_dims self.action_dims = action_dims self.learning_step_counter = 0 self.warmup = warmup self.time_step = 0 self.update_frequency = update_frequency self.soft_update(init=True) def soft_update(self, init=False): tau = 1 if init else self.tau actor_params = dict(self.actor.named_parameters()) critic1_params = dict(self.critic_1.named_parameters()) critic2_params = dict(self.critic_2.named_parameters()) target_actor_params = dict(self.target_actor.named_parameters()) target_critic1_params = dict(self.target_critic_1.named_parameters()) target_critic2_params = dict(self.target_critic_1.named_parameters()) for param in critic1_params: critic1_params[param] = tau * critic1_params[param].clone() + ( 1 - tau) * target_critic1_params[param].clone() for param in critic2_params: critic2_params[param] = tau * critic2_params[param].clone() + ( 1 - tau) * target_critic2_params[param].clone() for param in actor_params: actor_params[param] = tau * actor_params[param].clone() + ( 1 - tau) * target_actor_params[param].clone() self.target_actor.load_state_dict(actor_params) self.target_critic_1.load_state_dict(critic1_params) self.target_critic_2.load_state_dict(critic2_params) def save(self): torch.save(self.actor.state_dict(), "data/Actor.td3") torch.save(self.critic_1.state_dict(), "data/Critic1.td3") torch.save(self.critic_2.state_dict(), "data/Critic2.td3") torch.save(self.target_actor.state_dict(), "data/TargetActor.td3") torch.save(self.target_critic_1.state_dict(), "data/TargetCritic1.td3") torch.save(self.target_critic_2.state_dict(), "data/TargetCritic2.td3") def load(self): self.actor.load_state_dict(torch.load("data/Actor.td3")) self.critic_1.load_state_dict(torch.load("data/Critic1.td3")) self.critic_2.load_state_dict(torch.load("data/Critic2.td3")) self.target_actor.load_state_dict(torch.load("data/TargetActor.td3")) self.target_critic_1.load_state_dict( torch.load("data/TargetCritic1.td3")) self.target_critic_2.load_state_dict( torch.load("data/TargetCritic2.td3")) def get_action(self, state): if self.time_step < self.warmup: action = torch.tensor( np.random.normal(scale=1, size=self.action_dims)) else: state = torch.tensor(state, dtype=torch.float).to(self.device) mu = self.actor.forward(state).to(self.device) action = mu + torch.tensor(np.random.normal(scale=self.noise), dtype=torch.float).to(self.device) action = torch.clamp(action, self.min_action[0], self.max_action[0]) self.time_step += 1 return action.cpu().detach().numpy() def step(self, state, action, reward, next_state, done): self.memory.add(state, action, reward, next_state, done) self.learn() def sample_from_buffer(self): states, actions, rewards, next_states, dones = self.memory.sample( self.batch_size) states = torch.tensor(states, dtype=torch.float).to(self.device) actions = torch.tensor(actions, dtype=torch.float).to(self.device) rewards = torch.tensor(rewards, dtype=torch.float).to(self.device) next_states = torch.tensor(next_states, dtype=torch.float).to(self.device) dones = torch.tensor(dones).to(self.device) return states, actions, rewards, next_states, dones def learn(self): if len(self.memory) < self.batch_size: return states, actions, rewards, next_states, dones = self.sample_from_buffer( ) target_actions = self.target_actor.forward(next_states) target_actions = target_actions + torch.clamp( torch.tensor(np.random.normal(scale=0.2)), -0.5, 0.5) target_actions = torch.clamp(target_actions, self.min_action[0], self.max_action[0]) q1_target = self.target_critic_1.forward(next_states, target_actions) q2_target = self.target_critic_2.forward(next_states, target_actions) q1 = self.critic_1.forward(states, actions) q2 = self.critic_2.forward(states, actions) q1_target[dones] = 0 q2_target[dones] = 0 q_target = torch.min(q1_target, q2_target) target = rewards + self.gamma * q_target target = target.view(self.batch_size, 1) self.critic_1.optimizer.zero_grad() self.critic_2.optimizer.zero_grad() q1_loss = F.mse_loss(target, q1) q2_loss = F.mse_loss(target, q2) loss = q1_loss + q2_loss loss.backward() self.critic_1.optimizer.step() self.critic_2.optimizer.step() self.learning_step_counter += 1 if self.learning_step_counter % self.update_frequency != 0: return self.actor.optimizer.zero_grad() actor_q1_loss = self.critic_1.forward(states, self.actor.forward(states)) actor_loss = -torch.mean(actor_q1_loss) actor_loss.backward() self.actor.optimizer.step() self.soft_update()
class DDPG: def __init__(self, state_dim, action_dim, cfg): self.device = cfg.device # 定义网络 self.critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_critic = Critic(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_actor = Actor(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) # 定义优化器 self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.critic_lr) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr) self.memory = ReplayBuffer(cfg.memory_capacity) self.batch_size = cfg.batch_size self.soft_tau = cfg.soft_tau # 目标网络软更新 self.gamma = cfg.gamma # 同步target网络 self.target_critic.load_state_dict(self.critic.state_dict()) self.target_actor.load_state_dict(self.actor.state_dict()) def choose_action(self, state): state = torch.FloatTensor(state).unsqueeze(0).to(self.device) action = self.actor(state) return action.detach().cpu().numpy()[0, 0] # 经验池中的数据不能进行梯度的反向传播 def update(self): if len(self.memory) < self.batch_size: return # 从经验池中抽样出数据 state, action, reward, next_state, done = self.memory.sample( self.batch_size) # 将所有变量转换为张量 state = torch.FloatTensor(state).to(self.device) action = torch.FloatTensor(action).to(self.device) next_state = torch.FloatTensor(next_state).to(self.device) reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device) done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to( self.device) # done为np类型的数据,因此使用np.float32进行类型转换 # 计算价值损失 value = self.critic(state, action) # 此处动作直接从经验池中拿去是因为此时只是更新critic网络 next_action = self.target_actor(next_state) target_value = self.target_critic( next_state, next_action.detach()) # 因为target_net不进行参数更新 expected_value = reward + target_value * self.gamma * (1.0 - done) expected_value = torch.clamp(expected_value, -np.inf, np.inf) value_loss = nn.MSELoss()( value, expected_value.detach()) #只要截断此处就可以不会对target_net进行参数更新了 # 计算策略损失 policy_loss = -self.critic( state, self.actor(state) ) # 此处动作要用actor来产生,原因是用经验池子中的数据将不会更新actor,还有一点原因就是经验池子中的数据因为加了噪声的原因,已经不是策略网络产生的数据了 # 更新策略网络 self.actor_optimizer.zero_grad() policy_loss.backward(torch.ones_like( policy_loss)) # 当policy为张量时,需要输入参数torch.ones_like(policy_loss) self.actor_optimizer.step() # 更新critic网络 self.critic_optimizer.zero_grad() value_loss.backward(torch.ones_like(value_loss)) self.critic_optimizer.step() # 软更新更新目标网络 for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()): target_param.data.copy_(target_param.data * (1.0 - self.soft_tau) + param.data * self.soft_tau) for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()): target_param.data.copy_(target_param.data * (1.0 - self.soft_tau) + param.data * self.soft_tau) def save(self, path): torch.save(self.actor.state_dict(), path + 'checkpoint.pt') def load(self, path): self.actor.load_state_dict(torch.load(path + 'checkpoint.pt'))
def __init__(self, env, state_dims, action_dims, actor_lr, critic_lr, tau=0.005, gamma=0.99, buffer_size=1000000, fc1_size=400, fc2_size=300, batch_size=100, noise=0.1, update_frequency=2, warmup=1000, seed=0): np.random.seed(seed) self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.actor = Actor(actor_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.critic_1 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.critic_2 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.target_actor = Actor(actor_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.target_critic_1 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.target_critic_2 = Critic(critic_lr, state_dims, action_dims, fc1_size, fc2_size, seed=seed).to(self.device) self.memory = ReplayBuffer(buffer_size, seed=seed) self.batch_size = batch_size self.gamma = gamma self.tau = tau self.noise = noise self.max_action = env.action_space.high self.min_action = env.action_space.low self.state_dims = state_dims self.action_dims = action_dims self.learning_step_counter = 0 self.warmup = warmup self.time_step = 0 self.update_frequency = update_frequency self.soft_update(init=True)
class DoubleDQN: def __init__(self, state_dim, action_dim, cfg): self.action_dim = action_dim self.state_dim = state_dim self.gamma = cfg.gamma self.policy_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device) self.target_net.load_state_dict(self.policy_net.state_dict()) self.target_net.eval() # 不启用 BatchNormalization 和 Dropout self.optim = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) self.device = cfg.device self.frame_idx = 0 self.epsilon = lambda frame_idx: cfg.epsilon_end + ( cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay) self.memory = ReplayBuffer(cfg.memory_capacity) self.batch_size = cfg.batch_size self.loss = 0 def choose_action(self, state): self.frame_idx += 1 state = torch.tensor([state], device=self.device, dtype=torch.float32) if random.random() > self.epsilon(self.frame_idx): with torch.no_grad(): # 此处不进行梯度传播 q_values = self.policy_net(state) action = q_values.max(1)[1].item() else: action = random.randrange(self.action_dim) return action def update(self): if len(self.memory) < self.batch_size: return # 抽样数据 state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample( self.batch_size) # 将数据转换为Tensor并推送到GPU state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float32) action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1) reward_batch = torch.tensor(reward_batch, device=self.device) next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float32) done_batch = torch.tensor(done_batch, device=self.device) # 产生(s_t,a)下的q值 q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch) # 计算next_q_values next_action_batch = self.policy_net(next_state_batch).max( 1)[1].unsqueeze(1) # 此处就是DoubleDQN的关键,动作的选取是通过policy_net的 next_q_values = self.target_net(next_state_batch).gather( dim=1, index=next_action_batch).detach().squeeze(1) # q值是target_net输出的 expected_q_values = reward_batch + self.gamma * next_q_values * ( ~done_batch) self.loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) self.optim.zero_grad() self.loss.backward() for param in self.policy_net.parameters(): # clip防止梯度爆炸 param.grad.data.clamp_(-1, 1) self.optim.step() def save(self, path): torch.save(self.target_net.state_dict(), path + 'DQN_CheckPoint.pth') def load(self, path): self.target_net.load_state_dict(torch.load(path + 'DQN_CheckPoint.pth'))
class SAC: def __init__(self, state_dim, action_dim, cfg) -> None: self.batch_size = cfg.batch_size self.memory = ReplayBuffer(cfg.capacity) self.device = cfg.device self.value_net = ValueNet(state_dim, cfg.hidden_dim).to(self.device) self.target_value_net = ValueNet(state_dim, cfg.hidden_dim).to(self.device) self.soft_q_net = SoftQNet(state_dim, action_dim, cfg.hidden_dim).to(self.device) self.policy_net = PolicyNet(state_dim, action_dim, cfg.hidden_dim).to(self.device) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=cfg.value_lr) self.soft_q_optimizer = optim.Adam(self.soft_q_net.parameters(), lr=cfg.soft_q_lr) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.policy_lr) for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) self.value_criterion = nn.MSELoss() self.soft_q_criterion = nn.MSELoss() def update( self, gamma=0.99, mean_lambda=1e-3, std_lambda=1e-3, z_lambda=0.0, soft_tau=1e-2, ): if len(self.memory) < self.batch_size: return state, action, reward, next_state, done = self.memory.sample( self.batch_size) state = torch.FloatTensor(state).to(self.device) next_state = torch.FloatTensor(next_state).to(self.device) action = torch.FloatTensor(action).to(self.device) reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device) done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device) expected_q_value = self.soft_q_net(state, action) expected_value = self.value_net(state) new_action, log_prob, z, mean, log_std = self.policy_net.evaluate( state) target_value = self.target_value_net(next_state) next_q_value = reward + (1 - done) * gamma * target_value q_value_loss = self.soft_q_criterion(expected_q_value, next_q_value.detach()) expected_new_q_value = self.soft_q_net(state, new_action) next_value = expected_new_q_value - log_prob value_loss = self.value_criterion(expected_value, next_value.detach()) log_prob_target = expected_new_q_value - expected_value policy_loss = (log_prob * (log_prob - log_prob_target).detach()).mean() mean_loss = mean_lambda * mean.pow(2).mean() std_loss = std_lambda * log_std.pow(2).mean() z_loss = z_lambda * z.pow(2).sum(1).mean() policy_loss += mean_loss + std_loss + z_loss self.soft_q_optimizer.zero_grad() q_value_loss.backward() self.soft_q_optimizer.step() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(target_param.data * (1.0 - soft_tau) + param.data * soft_tau) def save(self, path): torch.save(self.value_net.state_dict(), path + "sac_value") torch.save(self.value_optimizer.state_dict(), path + "sac_value_optimizer") torch.save(self.soft_q_net.state_dict(), path + "sac_soft_q") torch.save(self.soft_q_optimizer.state_dict(), path + "sac_soft_q_optimizer") torch.save(self.policy_net.state_dict(), path + "sac_policy") torch.save(self.policy_optimizer.state_dict(), path + "sac_policy_optimizer") def load(self, path): self.value_net.load_state_dict(torch.load(path + "sac_value")) self.value_optimizer.load_state_dict( torch.load(path + "sac_value_optimizer")) self.target_value_net = copy.deepcopy(self.value_net) self.soft_q_net.load_state_dict(torch.load(path + "sac_soft_q")) self.soft_q_optimizer.load_state_dict( torch.load(path + "sac_soft_q_optimizer")) self.policy_net.load_state_dict(torch.load(path + "sac_policy")) self.policy_optimizer.load_state_dict( torch.load(path + "sac_policy_optimizer"))