Пример #1
0
 def __init__(self, state_dim, action_dim, cfg):
     self.action_dim = action_dim
     self.state_dim = state_dim
     self.loss = 0
     self.gamma = cfg.gamma
     self.frame_idx = 0 # 用于epsilon的衰减计数
     self.epsilon = lambda frame_idx: cfg.epsilon_end + (cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay)
     self.batch_size = cfg.batch_size
     self.device = cfg.device
     self.policy_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device)
     self.target_net = MLP(state_dim, action_dim, cfg.hidden_dim).to(cfg.device)
     self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
     self.memory = ReplayBuffer(cfg.memory_capacity)
Пример #2
0
    def __init__(self, state_dim, action_dim, cfg):

        self.action_dim = action_dim  # 总的动作个数
        self.device = cfg.device  # 设备,cpu或gpu等
        self.gamma = cfg.gamma  # 奖励的折扣因子
        # e-greedy策略相关参数
        self.frame_idx = 0  # 用于epsilon的衰减计数
        self.epsilon = lambda frame_idx: cfg.epsilon_end + \
            (cfg.epsilon_start - cfg.epsilon_end) * \
            math.exp(-1. * frame_idx / cfg.epsilon_decay)
        self.batch_size = cfg.batch_size
        self.policy_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
        self.target_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
        for target_param, param in zip(self.target_net.parameters(),self.policy_net.parameters()): # copy params from policy net
            target_param.data.copy_(param.data)
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
        self.memory = ReplayBuffer(cfg.memory_capacity)
Пример #3
0
    def __init__(self, state_dim, action_dim, cfg):

        self.action_dim = action_dim  # 总的动作个数
        self.device = cfg.device  # 设备,cpu或gpu等
        self.gamma = cfg.gamma
        # e-greedy策略相关参数
        self.actions_count = 0
        self.epsilon_start = cfg.epsilon_start
        self.epsilon_end = cfg.epsilon_end
        self.epsilon_decay = cfg.epsilon_decay
        self.batch_size = cfg.batch_size
        self.policy_net = MLP(state_dim, action_dim,
                              hidden_dim=cfg.hidden_dim).to(self.device)
        self.target_net = MLP(state_dim, action_dim,
                              hidden_dim=cfg.hidden_dim).to(self.device)
        # target_net的初始模型参数完全复制policy_net
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()  # 不启用 BatchNormalization 和 Dropout
        # 可查parameters()与state_dict()的区别,前者require_grad=True
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
        self.loss = 0
        self.memory = ReplayBuffer(cfg.memory_capacity)
Пример #4
0
 def __init__(self, state_dim, action_dim, cfg):
     self.state_dim = state_dim
     self.action_dim = action_dim
     self.gamma = cfg.gamma
     self.device = cfg.device
     self.batch_size = cfg.batch_size
     self.frame_idx = 0
     self.epsilon = lambda frame_idx: cfg.epsilon_end + (
         cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx /
                                                         cfg.epsilon_decay)
     self.policy_net = MLP(2 * state_dim, action_dim,
                           cfg.hidden_dim).to(cfg.device)
     self.meta_policy_net = MLP(state_dim, state_dim, cfg.hidden_dim).to(
         cfg.device)  # 高层策略用于产生高层指导动作,输出动作分布等价于状态分布
     self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
     self.meta_optimizer = optim.Adam(self.meta_policy_net.parameters(),
                                      lr=cfg.lr)
     self.memory = ReplayBuffer(cfg.memory_capacity)
     self.meta_memory = ReplayBuffer(cfg.memory_capacity)
     self.loss_numpy = 0
     self.meta_loss_numpy = 0
     self.losses = []
     self.meta_losses = []