def _initialize(self): """Initialize non-common things.""" # load demo replay memory with open(self.args.demo_path, "rb") as f: demo = list(pickle.load(f)) # HER if self.hyper_params.use_her: self.her = build_her(self.hyper_params.her) print(f"[INFO] Build {str(self.her)}.") if self.hyper_params.desired_states_from_demo: self.her.fetch_desired_states_from_demo(demo) self.transitions_epi: list = list() self.desired_state = np.zeros((1, )) demo = self.her.generate_demo_transitions(demo) if not self.her.is_goal_in_state: self.state_dim = (self.state_dim[0] * 2, ) else: self.her = None if not self.args.test: # Replay buffers demo_batch_size = self.hyper_params.demo_batch_size self.demo_memory = ReplayBuffer(len(demo), demo_batch_size) self.demo_memory.extend(demo) self.memory = ReplayBuffer(self.hyper_params.buffer_size, demo_batch_size) # set hyper parameters self.lambda2 = 1.0 / demo_batch_size
def _initialize(self): """Initialize non-common things.""" if not self.is_test: # replay memory for a single step self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, ) self.memory = PrioritizedBufferWrapper( self.memory, alpha=self.hyper_params.per_alpha) # replay memory for multi-steps if self.use_n_step: self.memory_n = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, ) build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.n, is_test=self.is_test, load_from=self.load_from, ) self.learner = build_learner(self.learner_cfg, build_args)
def _initialize(self): """Initialize non-common things.""" self.per_beta = self.hyper_params.per_beta self.use_n_step = self.hyper_params.n_step > 1 if not self.args.test: # load demo replay memory with open(self.args.demo_path, "rb") as f: demos = pickle.load(f) if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) # replay memory for multi-steps self.memory_n = ReplayBuffer( buffer_size=self.hyper_params.buffer_size, batch_size=self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory self.memory = PrioritizedReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, demo=demos, alpha=self.hyper_params.per_alpha, epsilon_d=self.hyper_params.per_eps_demo, )
def _initialize(self): """Initialize non-common things.""" if not self.args.test: # load demo replay memory demos = self._load_demos() if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) self.memory_n = ReplayBuffer( buffer_size=self.hyper_params.buffer_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory self.memory = PrioritizedReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, demo=demos, alpha=self.hyper_params.per_alpha, epsilon_d=self.hyper_params.per_eps_demo, )
def _initialize(self): """Initialize non-common things.""" if not self.args.test: # replay memory self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size )
def _initialize(self): """Initialize non-common things.""" self.per_beta = self.hyper_params.per_beta self.use_n_step = self.hyper_params.n_step > 1 if not self.args.test: # load demo replay memory with open(self.args.demo_path, "rb") as f: demos = pickle.load(f) if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) # replay memory for multi-steps self.memory_n = ReplayBuffer( max_len=self.hyper_params.buffer_size, batch_size=self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory for a single step self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, ) self.memory = PrioritizedBufferWrapper( self.memory, alpha=self.hyper_params.per_alpha) self.learner_cfg.type = "DDPGfDLearner" self.learner = build_learner(self.learner_cfg)
def _initialize(self): """Initialize non-common things.""" # load demo replay memory with open(self.args.demo_path, "rb") as f: demo = list(pickle.load(f)) # HER if self.hyper_params.use_her: self.her = build_her(self.hyper_params.her) print(f"[INFO] Build {str(self.her)}.") if self.hyper_params.desired_states_from_demo: self.her.fetch_desired_states_from_demo(demo) self.transitions_epi: list = list() self.desired_state = np.zeros((1, )) demo = self.her.generate_demo_transitions(demo) if not self.her.is_goal_in_state: self.state_dim = (self.state_dim[0] * 2, ) else: self.her = None if not self.args.test: # Replay buffers demo_batch_size = self.hyper_params.demo_batch_size self.demo_memory = ReplayBuffer(len(demo), demo_batch_size) self.demo_memory.extend(demo) self.memory = ReplayBuffer(self.hyper_params.sac_buffer_size, demo_batch_size) # set hyper parameters self.hyper_params["lambda2"] = 1.0 / demo_batch_size self.args.cfg_path = self.args.offer_cfg_path self.args.load_from = self.args.load_offer_from self.hyper_params.buffer_size = self.hyper_params.sac_buffer_size self.hyper_params.batch_size = self.hyper_params.sac_batch_size self.learner_cfg.type = "BCSACLearner" self.learner_cfg.hyper_params = self.hyper_params self.learner = build_learner(self.learner_cfg) del self.hyper_params.buffer_size del self.hyper_params.batch_size # init stack self.stack_size = self.args.stack_size self.stack_buffer = deque(maxlen=self.args.stack_size) self.stack_buffer_2 = deque(maxlen=self.args.stack_size) self.scores = list() self.utilities = list() self.rounds = list() self.opp_utilities = list()
def __init__( self, env: gym.Env, args: argparse.Namespace, log_cfg: ConfigDict, hyper_params: ConfigDict, backbone: ConfigDict, head: ConfigDict, optim_cfg: ConfigDict, noise_cfg: ConfigDict, ): """Initialize. Args: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings """ Agent.__init__(self, env, args, log_cfg) self.curr_state = np.zeros((1, )) self.total_step = 0 self.episode_step = 0 self.update_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.noise_cfg = noise_cfg self.backbone_cfg = backbone self.head_cfg = head self.optim_cfg = optim_cfg self.state_dim = self.env.observation_space.shape self.action_dim = self.env.action_space.shape[0] # noise instance to make randomness of action self.exploration_noise = GaussianNoise(self.action_dim, noise_cfg.exploration_noise, noise_cfg.exploration_noise) self.target_policy_noise = GaussianNoise( self.action_dim, noise_cfg.target_policy_noise, noise_cfg.target_policy_noise, ) if not self.args.test: # replay memory self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size) self._init_network()
def __init__( self, env: gym.Env, env_info: ConfigDict, args: argparse.Namespace, hyper_params: ConfigDict, learner_cfg: ConfigDict, noise_cfg: ConfigDict, log_cfg: ConfigDict, ): """Initialize. Args: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings """ Agent.__init__(self, env, env_info, args, log_cfg) self.curr_state = np.zeros((1,)) self.total_step = 0 self.episode_step = 0 self.update_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.learner_cfg = learner_cfg self.learner_cfg.args = self.args self.learner_cfg.env_info = self.env_info self.learner_cfg.hyper_params = self.hyper_params self.learner_cfg.log_cfg = self.log_cfg self.learner_cfg.noise_cfg = noise_cfg self.learner_cfg.device = device # noise instance to make randomness of action self.exploration_noise = GaussianNoise( self.env_info.action_space.shape[0], noise_cfg.exploration_noise, noise_cfg.exploration_noise, ) if not self.args.test: # replay memory self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size ) self.learner = build_learner(self.learner_cfg)
def _spawn(self): """Intialize distributed worker, learner and centralized replay buffer.""" replay_buffer = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, ) per_buffer = PrioritizedBufferWrapper( replay_buffer, alpha=self.hyper_params.per_alpha) self.global_buffer = ApeXBufferWrapper.remote(per_buffer, self.args, self.hyper_params, self.comm_cfg) learner = build_learner(self.learner_cfg) self.learner = ApeXLearnerWrapper.remote(learner, self.comm_cfg) state_dict = learner.get_state_dict() worker_build_args = dict(args=self.args, state_dict=state_dict) self.workers = [] self.num_workers = self.hyper_params.num_workers for rank in range(self.num_workers): worker_build_args["rank"] = rank worker = build_worker(self.worker_cfg, build_args=worker_build_args) apex_worker = ApeXWorkerWrapper.remote(worker, self.args, self.comm_cfg) self.workers.append(apex_worker) self.logger = build_logger(self.logger_cfg) self.processes = self.workers + [ self.learner, self.global_buffer, self.logger ]
def _initialize(self): """Initialize non-common things.""" # load demo replay memory with open(self.hyper_params.demo_path, "rb") as f: demo = list(pickle.load(f)) # HER if self.hyper_params.use_her: self.her = build_her(self.hyper_params.her) print(f"[INFO] Build {str(self.her)}.") if self.hyper_params.desired_states_from_demo: self.her.fetch_desired_states_from_demo(demo) self.transitions_epi: list = list() self.desired_state = np.zeros((1, )) demo = self.her.generate_demo_transitions(demo) if not self.her.is_goal_in_state: self.env_info.observation_space.shape = ( self.self.env_info.observation_space.shape[0] * 2, ) else: self.her = None if not self.is_test: # Replay buffers demo_batch_size = self.hyper_params.demo_batch_size self.demo_memory = ReplayBuffer(len(demo), demo_batch_size) self.demo_memory.extend(demo) self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size) # set hyper parameters self.hyper_params["lambda2"] = 1.0 / demo_batch_size build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, noise_cfg=self.noise_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from, ) self.learner = build_learner(self.learner_cfg, build_args)
def _initialize(self): """Initialize non-common things.""" if not self.args.test: # replay memory for a single step self.memory = PrioritizedReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, alpha=self.hyper_params.per_alpha, ) # replay memory for multi-steps if self.use_n_step: self.memory_n = ReplayBuffer( self.hyper_params.buffer_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, )
def _initialize(self): """Initialize non-common things.""" self.per_beta = self.hyper_params.per_beta self.use_n_step = self.hyper_params.n_step > 1 if not self.is_test: # load demo replay memory with open(self.hyper_params.demo_path, "rb") as f: demos = pickle.load(f) if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) # replay memory for multi-steps self.memory_n = ReplayBuffer( max_len=self.hyper_params.buffer_size, batch_size=self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory for a single step self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, demo=demos, ) self.memory = PrioritizedBufferWrapper( self.memory, alpha=self.hyper_params.per_alpha, epsilon_d=self.hyper_params.per_eps_demo, ) build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, noise_cfg=self.noise_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from, ) self.learner = build_learner(self.learner_cfg, build_args)
def _initialize(self): """Initialize non-common things.""" if not self.is_test: # replay memory self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size) build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, noise_cfg=self.noise_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from, ) self.learner = build_learner(self.learner_cfg, build_args)
def _initialize(self): """Initialize non-common things.""" if not self.is_test: # load demo replay memory demos = self._load_demos() if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) self.memory_n = ReplayBuffer( max_len=self.hyper_params.buffer_size, batch_size=self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, demo=demos, ) self.memory = PrioritizedBufferWrapper( self.memory, alpha=self.hyper_params.per_alpha, epsilon_d=self.hyper_params.per_eps_demo, ) build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.n, is_test=self.is_test, load_from=self.load_from, ) self.learner_cfg.type = "DQfDLearner" self.learner = build_learner(self.learner_cfg, build_args)
def test_uniform_sample(buffer_length=32, batch_size=8): """Test whether transitions are uniformly sampled from replay buffer.""" n_repeat = 10000 buffer = ReplayBuffer(max_len=buffer_length, batch_size=batch_size) sampled_lst = [0] * buffer.max_len # sampling index for the n_repeat times for _ in range(n_repeat): indices = generate_sample_idx(buffer) for idx in indices: sampled_lst[int(idx)] += 1 / n_repeat assert check_uniform(sampled_lst), "This distribution is not uniform."
def generate_prioritized_buffer( buffer_length: int, batch_size: int, idx_lst=None, prior_lst=None ) -> Tuple[PrioritizedBufferWrapper, List]: """Generate Prioritized Replay Buffer with random Prior.""" buffer = ReplayBuffer(max_len=buffer_length, batch_size=batch_size) prioritized_buffer = PrioritizedBufferWrapper(buffer) priority = np.random.randint(10, size=buffer_length) for i, j in enumerate(priority): prioritized_buffer.sum_tree[i] = j if idx_lst: for i, j in list(zip(idx_lst, prior_lst)): priority[i] = j prioritized_buffer.sum_tree[i] = j prop_lst = [i / sum(priority) for i in priority] return prioritized_buffer, prop_lst
class SACfDAgent(SACAgent): """SAC agent interacting with environment. Attrtibutes: memory (PrioritizedReplayBuffer): replay memory beta (float): beta parameter for prioritized replay buffer use_n_step (bool): whether or not to use n-step returns """ # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" self.per_beta = self.hyper_params.per_beta self.use_n_step = self.hyper_params.n_step > 1 if not self.args.test: # load demo replay memory with open(self.args.demo_path, "rb") as f: demos = pickle.load(f) if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) # replay memory for multi-steps self.memory_n = ReplayBuffer( buffer_size=self.hyper_params.buffer_size, batch_size=self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory self.memory = PrioritizedReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, demo=demos, alpha=self.hyper_params.per_alpha, epsilon_d=self.hyper_params.per_eps_demo, ) def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" # add n-step transition if self.use_n_step: transition = self.memory_n.add(transition) # add a single step transition # if transition is not an empty tuple if transition: self.memory.add(transition) # pylint: disable=too-many-statements def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" self.update_step += 1 experiences = self.memory.sample(self.per_beta) ( states, actions, rewards, next_states, dones, weights, indices, eps_d, ) = experiences new_actions, log_prob, pre_tanh_value, mu, std = self.actor(states) # train alpha if self.hyper_params.auto_entropy_tuning: alpha_loss = torch.mean( (-self.log_alpha * (log_prob + self.target_entropy).detach()) * weights) self.alpha_optim.zero_grad() alpha_loss.backward() self.alpha_optim.step() alpha = self.log_alpha.exp() else: alpha_loss = torch.zeros(1) alpha = self.hyper_params.w_entropy # Q function loss masks = 1 - dones gamma = self.hyper_params.gamma states_actions = torch.cat((states, actions), dim=-1) q_1_pred = self.qf_1(states_actions) q_2_pred = self.qf_2(states_actions) v_target = self.vf_target(next_states) q_target = rewards + self.hyper_params.gamma * v_target * masks qf_1_loss = torch.mean((q_1_pred - q_target.detach()).pow(2) * weights) qf_2_loss = torch.mean((q_2_pred - q_target.detach()).pow(2) * weights) if self.use_n_step: experiences_n = self.memory_n.sample(indices) _, _, rewards, next_states, dones = experiences_n gamma = gamma**self.hyper_params.n_step masks = 1 - dones v_target = self.vf_target(next_states) q_target = rewards + gamma * v_target * masks qf_1_loss_n = torch.mean( (q_1_pred - q_target.detach()).pow(2) * weights) qf_2_loss_n = torch.mean( (q_2_pred - q_target.detach()).pow(2) * weights) # to update loss and priorities qf_1_loss = qf_1_loss + qf_1_loss_n * self.hyper_params.lambda1 qf_2_loss = qf_2_loss + qf_2_loss_n * self.hyper_params.lambda1 # V function loss states_actions = torch.cat((states, new_actions), dim=-1) v_pred = self.vf(states) q_pred = torch.min(self.qf_1(states_actions), self.qf_2(states_actions)) v_target = (q_pred - alpha * log_prob).detach() vf_loss_element_wise = (v_pred - v_target).pow(2) vf_loss = torch.mean(vf_loss_element_wise * weights) # train Q functions self.qf_1_optim.zero_grad() qf_1_loss.backward() self.qf_1_optim.step() self.qf_2_optim.zero_grad() qf_2_loss.backward() self.qf_2_optim.step() # train V function self.vf_optim.zero_grad() vf_loss.backward() self.vf_optim.step() if self.update_step % self.hyper_params.policy_update_freq == 0: # actor loss advantage = q_pred - v_pred.detach() actor_loss_element_wise = alpha * log_prob - advantage actor_loss = torch.mean(actor_loss_element_wise * weights) # regularization mean_reg = self.hyper_params.w_mean_reg * mu.pow(2).mean() std_reg = self.hyper_params.w_std_reg * std.pow(2).mean() pre_activation_reg = self.hyper_params.w_pre_activation_reg * ( pre_tanh_value.pow(2).sum(dim=-1).mean()) actor_reg = mean_reg + std_reg + pre_activation_reg # actor loss + regularization actor_loss += actor_reg # train actor self.actor_optim.zero_grad() actor_loss.backward() self.actor_optim.step() # update target networks common_utils.soft_update(self.vf, self.vf_target, self.hyper_params.tau) # update priorities new_priorities = vf_loss_element_wise new_priorities += self.hyper_params.lambda3 * actor_loss_element_wise.pow( 2) new_priorities += self.hyper_params.per_eps new_priorities = new_priorities.data.cpu().numpy().squeeze() new_priorities += eps_d self.memory.update_priorities(indices, new_priorities) # increase beta fraction = min(float(self.i_episode) / self.args.episode_num, 1.0) self.per_beta = self.per_beta + fraction * (1.0 - self.per_beta) else: actor_loss = torch.zeros(1) return ( actor_loss.item(), qf_1_loss.item(), qf_2_loss.item(), vf_loss.item(), alpha_loss.item(), ) def pretrain(self): """Pretraining steps.""" pretrain_loss = list() pretrain_step = self.hyper_params.pretrain_step print("[INFO] Pre-Train %d steps." % pretrain_step) for i_step in range(1, pretrain_step + 1): t_begin = time.time() loss = self.update_model() t_end = time.time() pretrain_loss.append(loss) # for logging # logging if i_step == 1 or i_step % 100 == 0: avg_loss = np.vstack(pretrain_loss).mean(axis=0) pretrain_loss.clear() log_value = ( 0, avg_loss, 0, self.hyper_params.policy_update_freq, t_end - t_begin, ) self.write_log(log_value) print("[INFO] Pre-Train Complete!\n")
class DDPGfDAgent(DDPGAgent): """ActorCritic interacting with environment. Attributes: memory (PrioritizedReplayBuffer): replay memory per_beta (float): beta parameter for prioritized replay buffer use_n_step (bool): whether or not to use n-step returns """ # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" self.per_beta = self.hyper_params.per_beta self.use_n_step = self.hyper_params.n_step > 1 if not self.args.test: # load demo replay memory with open(self.args.demo_path, "rb") as f: demos = pickle.load(f) if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) # replay memory for multi-steps self.memory_n = ReplayBuffer( max_len=self.hyper_params.buffer_size, batch_size=self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory for a single step self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, ) self.memory = PrioritizedBufferWrapper( self.memory, alpha=self.hyper_params.per_alpha) self.learner_cfg.type = "DDPGfDLearner" self.learner = build_learner(self.learner_cfg) def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" # add n-step transition if self.use_n_step: transition = self.memory_n.add(transition) # add a single step transition # if transition is not an empty tuple if transition: self.memory.add(transition) def sample_experience(self) -> Tuple[torch.Tensor, ...]: experience_1 = self.memory.sample(self.per_beta) if self.use_n_step: indices = experience_1[-2] experience_n = self.memory_n.sample(indices) return numpy2floattensor(experience_1), numpy2floattensor( experience_n) return numpy2floattensor(experience_1) def pretrain(self): """Pretraining steps.""" pretrain_loss = list() pretrain_step = self.hyper_params.pretrain_step print("[INFO] Pre-Train %d step." % pretrain_step) for i_step in range(1, pretrain_step + 1): t_begin = time.time() experience = self.sample_experience() info = self.learner.update_model(experience) loss = info[0:2] t_end = time.time() pretrain_loss.append(loss) # for logging # logging if i_step == 1 or i_step % 100 == 0: avg_loss = np.vstack(pretrain_loss).mean(axis=0) pretrain_loss.clear() log_value = (0, avg_loss, 0, t_end - t_begin) self.write_log(log_value) print("[INFO] Pre-Train Complete!\n") def train(self): """Train the agent.""" # logger if self.args.log: self.set_wandb() # wandb.watch([self.actor, self.critic], log="parameters") # pre-training if needed self.pretrain() for self.i_episode in range(1, self.args.episode_num + 1): state = self.env.reset() done = False score = 0 self.episode_step = 0 losses = list() t_begin = time.time() while not done: if self.args.render and self.i_episode >= self.args.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 if len(self.memory) >= self.hyper_params.batch_size: for _ in range(self.hyper_params.multiple_update): experience = self.sample_experience() info = self.learner.update_model(experience) loss = info[0:2] indices, new_priorities = info[2:4] losses.append(loss) # for logging self.memory.update_priorities(indices, new_priorities) # increase priority beta fraction = min( float(self.i_episode) / self.args.episode_num, 1.0) self.per_beta = self.per_beta + fraction * (1.0 - self.per_beta) state = next_state score += reward t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step # logging if losses: avg_loss = np.vstack(losses).mean(axis=0) log_value = (self.i_episode, avg_loss, score, avg_time_cost) self.write_log(log_value) losses.clear() if self.i_episode % self.args.save_period == 0: self.learner.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.learner.save_params(self.i_episode) self.interim_test()
class BCSACAgent(SACAgent): """BC with SAC agent interacting with environment. Attrtibutes: her (HER): hinsight experience replay transitions_epi (list): transitions per episode (for HER) desired_state (np.ndarray): desired state of current episode memory (ReplayBuffer): replay memory demo_memory (ReplayBuffer): replay memory for demo lambda2 (float): proportion of BC loss """ # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" # load demo replay memory with open(self.hyper_params.demo_path, "rb") as f: demo = list(pickle.load(f)) # HER if self.hyper_params.use_her: self.her = build_her(self.hyper_params.her) print(f"[INFO] Build {str(self.her)}.") if self.hyper_params.desired_states_from_demo: self.her.fetch_desired_states_from_demo(demo) self.transitions_epi: list = list() self.desired_state = np.zeros((1,)) demo = self.her.generate_demo_transitions(demo) if not self.her.is_goal_in_state: self.state_dim = (self.state_dim[0] * 2,) else: self.her = None if not self.is_test: # Replay buffers demo_batch_size = self.hyper_params.demo_batch_size self.demo_memory = ReplayBuffer(len(demo), demo_batch_size) self.demo_memory.extend(demo) self.memory = ReplayBuffer(self.hyper_params.buffer_size, demo_batch_size) # set hyper parameters self.hyper_params["lambda2"] = 1.0 / demo_batch_size build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from, ) self.learner = build_learner(self.learner_cfg, build_args) def _preprocess_state(self, state: np.ndarray) -> torch.Tensor: """Preprocess state so that actor selects an action.""" if self.hyper_params.use_her: self.desired_state = self.her.get_desired_state() state = np.concatenate((state, self.desired_state), axis=-1) state = numpy2floattensor(state, self.learner.device) return state def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" if self.hyper_params.use_her: self.transitions_epi.append(transition) done = transition[-1] or self.episode_step == self.max_episode_steps if done: # insert generated transitions if the episode is done transitions = self.her.generate_transitions( self.transitions_epi, self.desired_state, self.hyper_params.success_score, ) self.memory.extend(transitions) self.transitions_epi.clear() else: self.memory.add(transition) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, policy_update_freq, avg_time_cost = log_value total_loss = loss.sum() print( "[INFO] episode %d, episode_step %d, total step %d, total score: %d\n" "total loss: %.3f actor_loss: %.3f qf_1_loss: %.3f qf_2_loss: %.3f " "vf_loss: %.3f alpha_loss: %.3f n_qf_mask: %d (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, total_loss, loss[0] * policy_update_freq, # actor loss loss[1], # qf_1 loss loss[2], # qf_2 loss loss[3], # vf loss loss[4], # alpha loss loss[5], # n_qf_mask avg_time_cost, ) ) if self.is_log: wandb.log( { "score": score, "total loss": total_loss, "actor loss": loss[0] * policy_update_freq, "qf_1 loss": loss[1], "qf_2 loss": loss[2], "vf loss": loss[3], "alpha loss": loss[4], "time per each step": avg_time_cost, } ) def train(self): """Train the agent.""" # logger if self.is_log: self.set_wandb() # wandb.watch([self.actor, self.vf, self.qf_1, self.qf_2], log="parameters") # pre-training if needed self.pretrain() for self.i_episode in range(1, self.episode_num + 1): state = self.env.reset() done = False score = 0 self.episode_step = 0 loss_episode = list() t_begin = time.time() while not done: if self.is_render and self.i_episode >= self.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 state = next_state score += reward # training if len(self.memory) >= self.hyper_params.batch_size: for _ in range(self.hyper_params.multiple_update): experience = self.memory.sample() demos = self.demo_memory.sample() experience, demo = ( numpy2floattensor(experience, self.learner.device), numpy2floattensor(demos, self.learner.device), ) loss = self.learner.update_model(experience, demo) loss_episode.append(loss) # for logging t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step # logging if loss_episode: avg_loss = np.vstack(loss_episode).mean(axis=0) log_value = ( self.i_episode, avg_loss, score, self.hyper_params.policy_update_freq, avg_time_cost, ) self.write_log(log_value) if self.i_episode % self.save_period == 0: self.learner.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.learner.save_params(self.i_episode) self.interim_test()
class DQNAgent(Agent): """DQN interacting with environment. Attribute: env (gym.Env): openAI Gym environment hyper_params (ConfigDict): hyper-parameters log_cfg (ConfigDict): configuration for saving log and checkpoint network_cfg (ConfigDict): config of network for training agent optim_cfg (ConfigDict): config of optimizer state_dim (int): state size of env action_dim (int): action size of env memory (PrioritizedReplayBuffer): replay memory curr_state (np.ndarray): temporary storage of the current state total_step (int): total step number episode_step (int): step number of the current episode i_episode (int): current episode number epsilon (float): parameter for epsilon greedy policy n_step_buffer (deque): n-size buffer to calculate n-step returns per_beta (float): beta parameter for prioritized replay buffer use_n_step (bool): whether or not to use n-step returns """ def __init__( self, env: gym.Env, env_info: ConfigDict, hyper_params: ConfigDict, learner_cfg: ConfigDict, log_cfg: ConfigDict, is_test: bool, load_from: str, is_render: bool, render_after: int, is_log: bool, save_period: int, episode_num: int, max_episode_steps: int, interim_test_num: int, ): """Initialize.""" Agent.__init__( self, env, env_info, log_cfg, is_test, load_from, is_render, render_after, is_log, save_period, episode_num, max_episode_steps, interim_test_num, ) self.curr_state = np.zeros(1) self.episode_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.learner_cfg = learner_cfg self.per_beta = hyper_params.per_beta self.use_n_step = hyper_params.n_step > 1 if self.learner_cfg.head.configs.use_noisy_net: self.max_epsilon = 0.0 self.min_epsilon = 0.0 self.epsilon = 0.0 else: self.max_epsilon = hyper_params.max_epsilon self.min_epsilon = hyper_params.min_epsilon self.epsilon = hyper_params.max_epsilon self._initialize() # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" if not self.is_test: # replay memory for a single step self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, ) self.memory = PrioritizedBufferWrapper( self.memory, alpha=self.hyper_params.per_alpha) # replay memory for multi-steps if self.use_n_step: self.memory_n = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, ) build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.n, is_test=self.is_test, load_from=self.load_from, ) self.learner = build_learner(self.learner_cfg, build_args) def select_action(self, state: np.ndarray) -> np.ndarray: """Select an action from the input space.""" self.curr_state = state # epsilon greedy policy if not self.is_test and self.epsilon > np.random.random(): selected_action = np.array(self.env.action_space.sample()) else: with torch.no_grad(): state = self._preprocess_state(state) selected_action = self.learner.dqn(state).argmax() selected_action = selected_action.detach().cpu().numpy() return selected_action # pylint: disable=no-self-use def _preprocess_state(self, state: np.ndarray) -> torch.Tensor: """Preprocess state so that actor selects an action.""" state = numpy2floattensor(state, self.learner.device) return state def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]: """Take an action and return the response of the env.""" next_state, reward, done, info = self.env.step(action) if not self.is_test: # if the last state is not a terminal state, store done as false done_bool = False if self.episode_step == self.max_episode_steps else done transition = (self.curr_state, action, reward, next_state, done_bool) self._add_transition_to_memory(transition) return next_state, reward, done, info def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" # add n-step transition if self.use_n_step: transition = self.memory_n.add(transition) # add a single step transition # if transition is not an empty tuple if transition: self.memory.add(transition) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, avg_time_cost = log_value print( "[INFO] episode %d, episode step: %d, total step: %d, total score: %f\n" "epsilon: %f, loss: %f, avg q-value: %f (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, self.epsilon, loss[0], loss[1], avg_time_cost, )) if self.is_log: wandb.log({ "score": score, "epsilon": self.epsilon, "dqn loss": loss[0], "avg q values": loss[1], "time per each step": avg_time_cost, "total_step": self.total_step, }) # pylint: disable=no-self-use, unnecessary-pass def pretrain(self): """Pretraining steps.""" pass def sample_experience(self) -> Tuple[torch.Tensor, ...]: """Sample experience from replay buffer.""" experiences_1 = self.memory.sample(self.per_beta) experiences_1 = ( numpy2floattensor(experiences_1[:6], self.learner.device) + experiences_1[6:]) if self.use_n_step: indices = experiences_1[-2] experiences_n = self.memory_n.sample(indices) return ( experiences_1, numpy2floattensor(experiences_n, self.learner.device), ) return experiences_1 def train(self): """Train the agent.""" # logger if self.is_log: self.set_wandb() # wandb.watch([self.dqn], log="parameters") # pre-training if needed self.pretrain() for self.i_episode in range(1, self.episode_num + 1): state = self.env.reset() self.episode_step = 0 losses = list() done = False score = 0 t_begin = time.time() while not done: if self.is_render and self.i_episode >= self.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 if len(self.memory) >= self.hyper_params.update_starts_from: if self.total_step % self.hyper_params.train_freq == 0: for _ in range(self.hyper_params.multiple_update): experience = self.sample_experience() info = self.learner.update_model(experience) loss = info[0:2] indices, new_priorities = info[2:4] losses.append(loss) # for logging self.memory.update_priorities( indices, new_priorities) # decrease epsilon self.epsilon = max( self.epsilon - (self.max_epsilon - self.min_epsilon) * self.hyper_params.epsilon_decay, self.min_epsilon, ) # increase priority beta fraction = min( float(self.i_episode) / self.episode_num, 1.0) self.per_beta = self.per_beta + fraction * (1.0 - self.per_beta) state = next_state score += reward t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step if losses: avg_loss = np.vstack(losses).mean(axis=0) log_value = (self.i_episode, avg_loss, score, avg_time_cost) self.write_log(log_value) if self.i_episode % self.save_period == 0: self.learner.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.learner.save_params(self.i_episode) self.interim_test()
def generate_sample_idx(buffer: ReplayBuffer) -> int: """Generate indices to test whether sampled uniformly or not.""" for i in range(buffer.max_len): buffer.add(generate_transition(i)) _, _, idx, _, _ = buffer.sample() return idx
class DQfDAgent(DQNAgent): """DQN interacting with environment. Attribute: memory (PrioritizedReplayBuffer): replay memory """ # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" if not self.args.test: # load demo replay memory demos = self._load_demos() if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma) self.memory_n = ReplayBuffer( buffer_size=self.hyper_params.buffer_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory self.memory = PrioritizedReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, demo=demos, alpha=self.hyper_params.per_alpha, epsilon_d=self.hyper_params.per_eps_demo, ) def _load_demos(self) -> list: """Load expert's demonstrations.""" # load demo replay memory with open(self.args.demo_path, "rb") as f: demos = pickle.load(f) return demos def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" experiences_1 = self.memory.sample() weights, indices, eps_d = experiences_1[-3:] actions = experiences_1[1] # 1 step loss gamma = self.hyper_params.gamma dq_loss_element_wise, q_values = self._get_dqn_loss( experiences_1, gamma) dq_loss = torch.mean(dq_loss_element_wise * weights) # n step loss if self.use_n_step: experiences_n = self.memory_n.sample(indices) gamma = self.hyper_params.gamma**self.hyper_params.n_step dq_loss_n_element_wise, q_values_n = self._get_dqn_loss( experiences_n, gamma) # to update loss and priorities q_values = 0.5 * (q_values + q_values_n) dq_loss_element_wise += dq_loss_n_element_wise * self.hyper_params.lambda1 dq_loss = torch.mean(dq_loss_element_wise * weights) # supervised loss using demo for only demo transitions demo_idxs = np.where(eps_d != 0.0) n_demo = demo_idxs[0].size if n_demo != 0: # if 1 or more demos are sampled # get margin for each demo transition action_idxs = actions[demo_idxs].long() margin = torch.ones(q_values.size()) * self.hyper_params.margin margin[demo_idxs, action_idxs] = 0.0 # demo actions have 0 margins margin = margin.to(device) # calculate supervised loss demo_q_values = q_values[demo_idxs, action_idxs].squeeze() supervised_loss = torch.max(q_values + margin, dim=-1)[0] supervised_loss = supervised_loss[demo_idxs] - demo_q_values supervised_loss = torch.mean( supervised_loss) * self.hyper_params.lambda2 else: # no demo sampled supervised_loss = torch.zeros(1, device=device) # q_value regularization q_regular = torch.norm(q_values, 2).mean() * self.hyper_params.w_q_reg # total loss loss = dq_loss + supervised_loss + q_regular # train dqn self.dqn_optim.zero_grad() loss.backward() clip_grad_norm_(self.dqn.parameters(), self.hyper_params.gradient_clip) self.dqn_optim.step() # update target networks common_utils.soft_update(self.dqn, self.dqn_target, self.hyper_params.tau) # update priorities in PER loss_for_prior = dq_loss_element_wise.detach().cpu().numpy().squeeze() new_priorities = loss_for_prior + self.hyper_params.per_eps new_priorities += eps_d self.memory.update_priorities(indices, new_priorities) # increase beta fraction = min(float(self.i_episode) / self.args.episode_num, 1.0) self.per_beta: float = self.per_beta + fraction * (1.0 - self.per_beta) if self.hyper_params.use_noisy_net: self.dqn.reset_noise() self.dqn_target.reset_noise() return ( loss.item(), dq_loss.item(), supervised_loss.item(), q_values.mean().item(), n_demo, ) def write_log(self, log_value: tuple): """Write log about loss and score""" i, avg_loss, score, avg_time_cost = log_value print( "[INFO] episode %d, episode step: %d, total step: %d, total score: %f\n" "epsilon: %f, total loss: %f, dq loss: %f, supervised loss: %f\n" "avg q values: %f, demo num in minibatch: %d (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, self.epsilon, avg_loss[0], avg_loss[1], avg_loss[2], avg_loss[3], avg_loss[4], avg_time_cost, )) if self.args.log: wandb.log({ "score": score, "epsilon": self.epsilon, "total loss": avg_loss[0], "dq loss": avg_loss[1], "supervised loss": avg_loss[2], "avg q values": avg_loss[3], "demo num in minibatch": avg_loss[4], "time per each step": avg_time_cost, }) def pretrain(self): """Pretraining steps.""" pretrain_loss = list() pretrain_step = self.hyper_params.pretrain_step print("[INFO] Pre-Train %d step." % pretrain_step) for i_step in range(1, pretrain_step + 1): t_begin = time.time() loss = self.update_model() t_end = time.time() pretrain_loss.append(loss) # for logging # logging if i_step == 1 or i_step % 100 == 0: avg_loss = np.vstack(pretrain_loss).mean(axis=0) pretrain_loss.clear() log_value = (0, avg_loss, 0.0, t_end - t_begin) self.write_log(log_value) print("[INFO] Pre-Train Complete!\n")
class DDPGfDAgent(DDPGAgent): """ActorCritic interacting with environment. Attributes: memory (PrioritizedReplayBuffer): replay memory per_beta (float): beta parameter for prioritized replay buffer use_n_step (bool): whether or not to use n-step returns """ # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" self.per_beta = self.hyper_params.per_beta self.use_n_step = self.hyper_params.n_step > 1 if not self.args.test: # load demo replay memory with open(self.args.demo_path, "rb") as f: demos = pickle.load(f) if self.use_n_step: demos, demos_n_step = common_utils.get_n_step_info_from_demo( demos, self.hyper_params.n_step, self.hyper_params.gamma ) # replay memory for multi-steps self.memory_n = ReplayBuffer( buffer_size=self.hyper_params.buffer_size, batch_size=self.hyper_params.batch_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, demo=demos_n_step, ) # replay memory for a single step self.memory = PrioritizedReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, demo=demos, alpha=self.hyper_params.per_alpha, epsilon_d=self.hyper_params.per_eps_demo, ) def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" # add n-step transition if self.use_n_step: transition = self.memory_n.add(transition) # add a single step transition # if transition is not an empty tuple if transition: self.memory.add(transition) def _get_critic_loss( self, experiences: Tuple[torch.Tensor, ...], gamma: float ) -> torch.Tensor: """Return element-wise critic loss.""" states, actions, rewards, next_states, dones = experiences[:5] # G_t = r + gamma * v(s_{t+1}) if state != Terminal # = r otherwise masks = 1 - dones next_actions = self.actor_target(next_states) next_states_actions = torch.cat((next_states, next_actions), dim=-1) next_values = self.critic_target(next_states_actions) curr_returns = rewards + gamma * next_values * masks curr_returns = curr_returns.to(device).detach() # train critic values = self.critic(torch.cat((states, actions), dim=-1)) critic_loss_element_wise = (values - curr_returns).pow(2) return critic_loss_element_wise def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" experiences_1 = self.memory.sample(self.per_beta) states, actions = experiences_1[:2] weights, indices, eps_d = experiences_1[-3:] gamma = self.hyper_params.gamma # train critic gradient_clip_ac = self.hyper_params.gradient_clip_ac gradient_clip_cr = self.hyper_params.gradient_clip_cr critic_loss_element_wise = self._get_critic_loss(experiences_1, gamma) critic_loss = torch.mean(critic_loss_element_wise * weights) if self.use_n_step: experiences_n = self.memory_n.sample(indices) gamma = gamma ** self.hyper_params.n_step critic_loss_n_element_wise = self._get_critic_loss(experiences_n, gamma) # to update loss and priorities critic_loss_element_wise += ( critic_loss_n_element_wise * self.hyper_params.lambda1 ) critic_loss = torch.mean(critic_loss_element_wise * weights) self.critic_optim.zero_grad() critic_loss.backward() nn.utils.clip_grad_norm_(self.critic.parameters(), gradient_clip_cr) self.critic_optim.step() # train actor actions = self.actor(states) actor_loss_element_wise = -self.critic(torch.cat((states, actions), dim=-1)) actor_loss = torch.mean(actor_loss_element_wise * weights) self.actor_optim.zero_grad() actor_loss.backward() nn.utils.clip_grad_norm_(self.actor.parameters(), gradient_clip_ac) self.actor_optim.step() # update target networks common_utils.soft_update(self.actor, self.actor_target, self.hyper_params.tau) common_utils.soft_update(self.critic, self.critic_target, self.hyper_params.tau) # update priorities new_priorities = critic_loss_element_wise new_priorities += self.hyper_params.lambda3 * actor_loss_element_wise.pow(2) new_priorities += self.hyper_params.per_eps new_priorities = new_priorities.data.cpu().numpy().squeeze() new_priorities += eps_d self.memory.update_priorities(indices, new_priorities) # increase beta fraction = min(float(self.i_episode) / self.args.episode_num, 1.0) self.per_beta = self.per_beta + fraction * (1.0 - self.per_beta) return actor_loss.item(), critic_loss.item() def pretrain(self): """Pretraining steps.""" pretrain_loss = list() pretrain_step = self.hyper_params.pretrain_step print("[INFO] Pre-Train %d step." % pretrain_step) for i_step in range(1, pretrain_step + 1): t_begin = time.time() loss = self.update_model() t_end = time.time() pretrain_loss.append(loss) # for logging # logging if i_step == 1 or i_step % 100 == 0: avg_loss = np.vstack(pretrain_loss).mean(axis=0) pretrain_loss.clear() log_value = (0, avg_loss, 0, t_end - t_begin) self.write_log(log_value) print("[INFO] Pre-Train Complete!\n")
class TD3Agent(Agent): """ActorCritic interacting with environment. Attributes: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings hyper_params (ConfigDict): hyper-parameters network_cfg (ConfigDict): config of network for training agent optim_cfg (ConfigDict): config of optimizer state_dim (int): state size of env action_dim (int): action size of env memory (ReplayBuffer): replay memory exploration_noise (GaussianNoise): random noise for exploration target_policy_noise (GaussianNoise): random noise for target values actor (nn.Module): actor model to select actions critic1 (nn.Module): critic model to predict state values critic2 (nn.Module): critic model to predict state values critic_target1 (nn.Module): target critic model to predict state values critic_target2 (nn.Module): target critic model to predict state values actor_target (nn.Module): target actor model to select actions critic_optim (Optimizer): optimizer for training critic actor_optim (Optimizer): optimizer for training actor curr_state (np.ndarray): temporary storage of the current state total_steps (int): total step numbers episode_steps (int): step number of the current episode i_episode (int): current episode number noise_cfg (ConfigDict): config of noise """ def __init__( self, env: gym.Env, args: argparse.Namespace, log_cfg: ConfigDict, hyper_params: ConfigDict, backbone: ConfigDict, head: ConfigDict, optim_cfg: ConfigDict, noise_cfg: ConfigDict, ): """Initialize. Args: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings """ Agent.__init__(self, env, args, log_cfg) self.curr_state = np.zeros((1, )) self.total_step = 0 self.episode_step = 0 self.update_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.noise_cfg = noise_cfg self.backbone_cfg = backbone self.head_cfg = head self.optim_cfg = optim_cfg self.state_dim = self.env.observation_space.shape self.action_dim = self.env.action_space.shape[0] # noise instance to make randomness of action self.exploration_noise = GaussianNoise(self.action_dim, noise_cfg.exploration_noise, noise_cfg.exploration_noise) self.target_policy_noise = GaussianNoise( self.action_dim, noise_cfg.target_policy_noise, noise_cfg.target_policy_noise, ) if not self.args.test: # replay memory self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size) self._init_network() def _init_network(self): """Initialize networks and optimizers.""" self.head_cfg.actor.configs.state_size = self.state_dim self.head_cfg.critic.configs.state_size = (self.state_dim[0] + self.action_dim, ) self.head_cfg.actor.configs.output_size = self.action_dim # create actor self.actor = BaseNetwork(self.backbone_cfg.actor, self.head_cfg.actor).to(device) self.actor_target = BaseNetwork(self.backbone_cfg.actor, self.head_cfg.actor).to(device) self.actor_target.load_state_dict(self.actor.state_dict()) # create q_critic self.critic1 = BaseNetwork(self.backbone_cfg.critic, self.head_cfg.critic).to(device) self.critic2 = BaseNetwork(self.backbone_cfg.critic, self.head_cfg.critic).to(device) self.critic_target1 = BaseNetwork(self.backbone_cfg.critic, self.head_cfg.critic).to(device) self.critic_target2 = BaseNetwork(self.backbone_cfg.critic, self.head_cfg.critic).to(device) self.critic_target1.load_state_dict(self.critic1.state_dict()) self.critic_target2.load_state_dict(self.critic2.state_dict()) # concat critic parameters to use one optim critic_parameters = list(self.critic1.parameters()) + list( self.critic2.parameters()) # create optimizers self.actor_optim = optim.Adam( self.actor.parameters(), lr=self.optim_cfg.lr_actor, weight_decay=self.optim_cfg.weight_decay, ) self.critic_optim = optim.Adam( critic_parameters, lr=self.optim_cfg.lr_critic, weight_decay=self.optim_cfg.weight_decay, ) # load the optimizer and model parameters if self.args.load_from is not None: self.load_params(self.args.load_from) def select_action(self, state: np.ndarray) -> np.ndarray: """Select an action from the input space.""" # initial training step, try random action for exploration self.curr_state = state if (self.total_step < self.hyper_params.initial_random_action and not self.args.test): return np.array(self.env.action_space.sample()) state = torch.FloatTensor(state).to(device) selected_action = self.actor(state).detach().cpu().numpy() if not self.args.test: noise = self.exploration_noise.sample() selected_action = np.clip(selected_action + noise, -1.0, 1.0) return selected_action def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]: """Take an action and return the response of the env.""" next_state, reward, done, info = self.env.step(action) if not self.args.test: # if last state is not terminal state in episode, done is false done_bool = (False if self.episode_step == self.args.max_episode_steps else done) self.memory.add( (self.curr_state, action, reward, next_state, done_bool)) return next_state, reward, done, info def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" self.update_step += 1 experiences = self.memory.sample() states, actions, rewards, next_states, dones = experiences masks = 1 - dones # get actions with noise noise = torch.FloatTensor(self.target_policy_noise.sample()).to(device) clipped_noise = torch.clamp( noise, -self.noise_cfg.target_policy_noise_clip, self.noise_cfg.target_policy_noise_clip, ) next_actions = (self.actor_target(next_states) + clipped_noise).clamp( -1.0, 1.0) # min (Q_1', Q_2') next_states_actions = torch.cat((next_states, next_actions), dim=-1) next_values1 = self.critic_target1(next_states_actions) next_values2 = self.critic_target2(next_states_actions) next_values = torch.min(next_values1, next_values2) # G_t = r + gamma * v(s_{t+1}) if state != Terminal # = r otherwise curr_returns = rewards + self.hyper_params.gamma * next_values * masks curr_returns = curr_returns.detach() # critic loss state_actions = torch.cat((states, actions), dim=-1) values1 = self.critic1(state_actions) values2 = self.critic2(state_actions) critic1_loss = F.mse_loss(values1, curr_returns) critic2_loss = F.mse_loss(values2, curr_returns) # train critic critic_loss = critic1_loss + critic2_loss self.critic_optim.zero_grad() critic_loss.backward() self.critic_optim.step() if self.update_step % self.hyper_params.policy_update_freq == 0: # policy loss actions = self.actor(states) state_actions = torch.cat((states, actions), dim=-1) actor_loss = -self.critic1(state_actions).mean() # train actor self.actor_optim.zero_grad() actor_loss.backward() self.actor_optim.step() # update target networks tau = self.hyper_params.tau common_utils.soft_update(self.critic1, self.critic_target1, tau) common_utils.soft_update(self.critic2, self.critic_target2, tau) common_utils.soft_update(self.actor, self.actor_target, tau) else: actor_loss = torch.zeros(1) return actor_loss.item(), critic1_loss.item(), critic2_loss.item() def load_params(self, path: str): """Load model and optimizer parameters.""" Agent.load_params(self, path) params = torch.load(path) self.critic1.load_state_dict(params["critic1"]) self.critic2.load_state_dict(params["critic2"]) self.critic_target1.load_state_dict(params["critic_target1"]) self.critic_target2.load_state_dict(params["critic_target2"]) self.critic_optim.load_state_dict(params["critic_optim"]) self.actor.load_state_dict(params["actor"]) self.actor_target.load_state_dict(params["actor_target"]) self.actor_optim.load_state_dict(params["actor_optim"]) print("[INFO] loaded the model and optimizer from", path) def save_params(self, n_episode: int): # type: ignore """Save model and optimizer parameters.""" params = { "actor": self.actor.state_dict(), "actor_target": self.actor_target.state_dict(), "actor_optim": self.actor_optim.state_dict(), "critic1": self.critic1.state_dict(), "critic2": self.critic2.state_dict(), "critic_target1": self.critic_target1.state_dict(), "critic_target2": self.critic_target2.state_dict(), "critic_optim": self.critic_optim.state_dict(), } Agent.save_params(self, params, n_episode) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, policy_update_freq, avg_time_cost = log_value total_loss = loss.sum() print( "[INFO] episode %d, episode_step: %d, total_step: %d, total score: %d\n" "total loss: %f actor_loss: %.3f critic1_loss: %.3f critic2_loss: %.3f " "(spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, total_loss, loss[0] * policy_update_freq, # actor loss loss[1], # critic1 loss loss[2], # critic2 loss avg_time_cost, )) if self.args.log: wandb.log({ "score": score, "total loss": total_loss, "actor loss": loss[0] * policy_update_freq, "critic1 loss": loss[1], "critic2 loss": loss[2], "time per each step": avg_time_cost, }) def train(self): """Train the agent.""" # logger if self.args.log: self.set_wandb() # wandb.watch([self.actor, self.critic1, self.critic2], log="parameters") for self.i_episode in range(1, self.args.episode_num + 1): state = self.env.reset() done = False score = 0 loss_episode = list() self.episode_step = 0 t_begin = time.time() while not done: if self.args.render and self.i_episode >= self.args.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 state = next_state score += reward if len(self.memory) >= self.hyper_params.batch_size: loss = self.update_model() loss_episode.append(loss) # for logging t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step # logging if loss_episode: avg_loss = np.vstack(loss_episode).mean(axis=0) log_value = ( self.i_episode, avg_loss, score, self.hyper_params.policy_update_freq, avg_time_cost, ) self.write_log(log_value) if self.i_episode % self.args.save_period == 0: self.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.save_params(self.i_episode) self.interim_test()
class DDPGAgent(Agent): """DDPG interacting with environment. Attributes: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings hyper_params (ConfigDict): hyper-parameters log_cfg (ConfigDict): configuration for saving log and checkpoint network_cfg (ConfigDict): config of network for training agent optim_cfg (ConfigDict): config of optimizer state_dim (int): state size of env action_dim (int): action size of env memory (ReplayBuffer): replay memory noise (OUNoise): random noise for exploration curr_state (np.ndarray): temporary storage of the current state total_step (int): total step numbers episode_step (int): step number of the current episode i_episode (int): current episode number """ def __init__( self, env: gym.Env, env_info: ConfigDict, hyper_params: ConfigDict, learner_cfg: ConfigDict, noise_cfg: ConfigDict, log_cfg: ConfigDict, is_test: bool, load_from: str, is_render: bool, render_after: int, is_log: bool, save_period: int, episode_num: int, max_episode_steps: int, interim_test_num: int, ): """Initialize.""" Agent.__init__( self, env, env_info, log_cfg, is_test, load_from, is_render, render_after, is_log, save_period, episode_num, max_episode_steps, interim_test_num, ) self.curr_state = np.zeros((1, )) self.total_step = 0 self.episode_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.learner_cfg = learner_cfg self.noise_cfg = noise_cfg # set noise self.noise = OUNoise( env_info.action_space.shape[0], theta=noise_cfg.ou_noise_theta, sigma=noise_cfg.ou_noise_sigma, ) self._initialize() # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" if not self.is_test: # replay memory self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size) build_args = dict( hyper_params=self.hyper_params, log_cfg=self.log_cfg, noise_cfg=self.noise_cfg, env_name=self.env_info.name, state_size=self.env_info.observation_space.shape, output_size=self.env_info.action_space.shape[0], is_test=self.is_test, load_from=self.load_from, ) self.learner = build_learner(self.learner_cfg, build_args) def select_action(self, state: np.ndarray) -> np.ndarray: """Select an action from the input space.""" self.curr_state = state state = self._preprocess_state(state) # if initial random action should be conducted if (self.total_step < self.hyper_params.initial_random_action and not self.is_test): return np.array(self.env_info.action_space.sample()) with torch.no_grad(): selected_action = self.learner.actor(state).detach().cpu().numpy() if not self.is_test: noise = self.noise.sample() selected_action = np.clip(selected_action + noise, -1.0, 1.0) return selected_action # pylint: disable=no-self-use def _preprocess_state(self, state: np.ndarray) -> torch.Tensor: """Preprocess state so that actor selects an action.""" state = numpy2floattensor(state, self.learner.device) return state def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]: """Take an action and return the response of the env.""" next_state, reward, done, info = self.env.step(action) if not self.is_test: # if the last state is not a terminal state, store done as false done_bool = False if self.episode_step == self.max_episode_steps else done transition = (self.curr_state, action, reward, next_state, done_bool) self._add_transition_to_memory(transition) return next_state, reward, done, info def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" self.memory.add(transition) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, avg_time_cost = log_value total_loss = loss.sum() print( "[INFO] episode %d, episode step: %d, total step: %d, total score: %d\n" "total loss: %f actor_loss: %.3f critic_loss: %.3f (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, total_loss, loss[0], loss[1], avg_time_cost, ) # actor loss # critic loss ) if self.is_log: wandb.log({ "score": score, "total loss": total_loss, "actor loss": loss[0], "critic loss": loss[1], "time per each step": avg_time_cost, }) # pylint: disable=no-self-use, unnecessary-pass def pretrain(self): """Pretraining steps.""" pass def train(self): """Train the agent.""" # logger if self.is_log: self.set_wandb() # wandb.watch([self.actor, self.critic], log="parameters") # pre-training if needed self.pretrain() for self.i_episode in range(1, self.episode_num + 1): state = self.env.reset() done = False score = 0 self.episode_step = 0 losses = list() t_begin = time.time() while not done: if self.is_render and self.i_episode >= self.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 if len(self.memory) >= self.hyper_params.batch_size: for _ in range(self.hyper_params.multiple_update): experience = self.memory.sample() experience = numpy2floattensor(experience, self.learner.device) loss = self.learner.update_model(experience) losses.append(loss) # for logging state = next_state score += reward t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step # logging if losses: avg_loss = np.vstack(losses).mean(axis=0) log_value = (self.i_episode, avg_loss, score, avg_time_cost) self.write_log(log_value) losses.clear() if self.i_episode % self.save_period == 0: self.learner.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.learner.save_params(self.i_episode) self.interim_test()
class DQNAgent(Agent): """DQN interacting with environment. Attribute: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings hyper_params (ConfigDict): hyper-parameters network_cfg (ConfigDict): config of network for training agent optim_cfg (ConfigDict): config of optimizer state_dim (int): state size of env action_dim (int): action size of env memory (PrioritizedReplayBuffer): replay memory dqn (nn.Module): actor model to select actions dqn_target (nn.Module): target actor model to select actions dqn_optim (Optimizer): optimizer for training actor curr_state (np.ndarray): temporary storage of the current state total_step (int): total step number episode_step (int): step number of the current episode i_episode (int): current episode number epsilon (float): parameter for epsilon greedy policy n_step_buffer (deque): n-size buffer to calculate n-step returns per_beta (float): beta parameter for prioritized replay buffer use_conv (bool): whether or not to use convolution layer use_n_step (bool): whether or not to use n-step returns """ def __init__( self, env: gym.Env, args: argparse.Namespace, log_cfg: ConfigDict, hyper_params: ConfigDict, network_cfg: ConfigDict, optim_cfg: ConfigDict, ): """Initialize.""" Agent.__init__(self, env, args, log_cfg) self.curr_state = np.zeros(1) self.episode_step = 0 self.total_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.network_cfg = network_cfg self.optim_cfg = optim_cfg self.state_dim = self.env.observation_space.shape self.action_dim = self.env.action_space.n self.per_beta = hyper_params.per_beta self.use_conv = len(self.state_dim) > 1 self.use_n_step = hyper_params.n_step > 1 if hyper_params.use_noisy_net: self.max_epsilon = 0.0 self.min_epsilon = 0.0 self.epsilon = 0.0 else: self.max_epsilon = hyper_params.max_epsilon self.min_epsilon = hyper_params.min_epsilon self.epsilon = hyper_params.max_epsilon self._initialize() self._init_network() # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" if not self.args.test: # replay memory for a single step self.memory = PrioritizedReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size, alpha=self.hyper_params.per_alpha, ) # replay memory for multi-steps if self.use_n_step: self.memory_n = ReplayBuffer( self.hyper_params.buffer_size, n_step=self.hyper_params.n_step, gamma=self.hyper_params.gamma, ) # pylint: disable=attribute-defined-outside-init def _init_network(self): """Initialize networks and optimizers.""" if self.use_conv: # create CNN self.dqn = dqn_utils.get_cnn_model(self.hyper_params, self.action_dim, self.state_dim, self.network_cfg) self.dqn_target = dqn_utils.get_cnn_model(self.hyper_params, self.action_dim, self.state_dim, self.network_cfg) else: # create FC fc_input_size = self.state_dim[0] self.dqn = dqn_utils.get_fc_model( self.hyper_params, fc_input_size, self.action_dim, self.network_cfg.hidden_sizes, ) self.dqn_target = dqn_utils.get_fc_model( self.hyper_params, fc_input_size, self.action_dim, self.network_cfg.hidden_sizes, ) self.dqn_target.load_state_dict(self.dqn.state_dict()) # create optimizer self.dqn_optim = optim.Adam( self.dqn.parameters(), lr=self.optim_cfg.lr_dqn, weight_decay=self.optim_cfg.weight_decay, eps=self.optim_cfg.adam_eps, ) # load the optimizer and model parameters if self.args.load_from is not None: self.load_params(self.args.load_from) def select_action(self, state: np.ndarray) -> np.ndarray: """Select an action from the input space.""" self.curr_state = state # epsilon greedy policy # pylint: disable=comparison-with-callable if not self.args.test and self.epsilon > np.random.random(): selected_action = np.array(self.env.action_space.sample()) else: state = self._preprocess_state(state) selected_action = self.dqn(state).argmax() selected_action = selected_action.detach().cpu().numpy() return selected_action # pylint: disable=no-self-use def _preprocess_state(self, state: np.ndarray) -> torch.Tensor: """Preprocess state so that actor selects an action.""" state = torch.FloatTensor(state).to(device) return state def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]: """Take an action and return the response of the env.""" next_state, reward, done, info = self.env.step(action) if not self.args.test: # if the last state is not a terminal state, store done as false done_bool = (False if self.episode_step == self.args.max_episode_steps else done) transition = (self.curr_state, action, reward, next_state, done_bool) self._add_transition_to_memory(transition) return next_state, reward, done, info def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" # add n-step transition if self.use_n_step: transition = self.memory_n.add(transition) # add a single step transition # if transition is not an empty tuple if transition: self.memory.add(transition) def _get_dqn_loss(self, experiences: Tuple[torch.Tensor, ...], gamma: float) -> Tuple[torch.Tensor, torch.Tensor]: """Return element-wise dqn loss and Q-values.""" if self.hyper_params.use_dist_q == "IQN": return dqn_utils.calculate_iqn_loss( model=self.dqn, target_model=self.dqn_target, experiences=experiences, gamma=gamma, batch_size=self.hyper_params.batch_size, n_tau_samples=self.hyper_params.n_tau_samples, n_tau_prime_samples=self.hyper_params.n_tau_prime_samples, kappa=self.hyper_params.kappa, ) elif self.hyper_params.use_dist_q == "C51": return dqn_utils.calculate_c51_loss( model=self.dqn, target_model=self.dqn_target, experiences=experiences, gamma=gamma, batch_size=self.hyper_params.batch_size, v_min=self.hyper_params.v_min, v_max=self.hyper_params.v_max, atom_size=self.hyper_params.atoms, ) else: return dqn_utils.calculate_dqn_loss( model=self.dqn, target_model=self.dqn_target, experiences=experiences, gamma=gamma, ) def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" # 1 step loss experiences_1 = self.memory.sample(self.per_beta) weights, indices = experiences_1[-3:-1] gamma = self.hyper_params.gamma dq_loss_element_wise, q_values = self._get_dqn_loss( experiences_1, gamma) dq_loss = torch.mean(dq_loss_element_wise * weights) # n step loss if self.use_n_step: experiences_n = self.memory_n.sample(indices) gamma = self.hyper_params.gamma**self.hyper_params.n_step dq_loss_n_element_wise, q_values_n = self._get_dqn_loss( experiences_n, gamma) # to update loss and priorities q_values = 0.5 * (q_values + q_values_n) dq_loss_element_wise += dq_loss_n_element_wise * self.hyper_params.w_n_step dq_loss = torch.mean(dq_loss_element_wise * weights) # q_value regularization q_regular = torch.norm(q_values, 2).mean() * self.hyper_params.w_q_reg # total loss loss = dq_loss + q_regular self.dqn_optim.zero_grad() loss.backward() clip_grad_norm_(self.dqn.parameters(), self.hyper_params.gradient_clip) self.dqn_optim.step() # update target networks common_utils.soft_update(self.dqn, self.dqn_target, self.hyper_params.tau) # update priorities in PER loss_for_prior = dq_loss_element_wise.detach().cpu().numpy() new_priorities = loss_for_prior + self.hyper_params.per_eps self.memory.update_priorities(indices, new_priorities) # increase beta fraction = min(float(self.i_episode) / self.args.episode_num, 1.0) self.per_beta = self.per_beta + fraction * (1.0 - self.per_beta) if self.hyper_params.use_noisy_net: self.dqn.reset_noise() self.dqn_target.reset_noise() return loss.item(), q_values.mean().item() def load_params(self, path: str): """Load model and optimizer parameters.""" Agent.load_params(self, path) params = torch.load(path) self.dqn.load_state_dict(params["dqn_state_dict"]) self.dqn_target.load_state_dict(params["dqn_target_state_dict"]) self.dqn_optim.load_state_dict(params["dqn_optim_state_dict"]) print("[INFO] loaded the model and optimizer from", path) def save_params(self, n_episode: int): # type: ignore """Save model and optimizer parameters.""" params = { "dqn_state_dict": self.dqn.state_dict(), "dqn_target_state_dict": self.dqn_target.state_dict(), "dqn_optim_state_dict": self.dqn_optim.state_dict(), } Agent.save_params(self, params, n_episode) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, avg_time_cost = log_value print( "[INFO] episode %d, episode step: %d, total step: %d, total score: %f\n" "epsilon: %f, loss: %f, avg q-value: %f (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, self.epsilon, loss[0], loss[1], avg_time_cost, )) if self.args.log: wandb.log({ "score": score, "epsilon": self.epsilon, "dqn loss": loss[0], "avg q values": loss[1], "time per each step": avg_time_cost, }) # pylint: disable=no-self-use, unnecessary-pass def pretrain(self): """Pretraining steps.""" pass def train(self): """Train the agent.""" # logger if self.args.log: self.set_wandb() # wandb.watch([self.dqn], log="parameters") # pre-training if needed self.pretrain() for self.i_episode in range(1, self.args.episode_num + 1): state = self.env.reset() self.episode_step = 0 losses = list() done = False score = 0 t_begin = time.time() while not done: if self.args.render and self.i_episode >= self.args.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 if len(self.memory) >= self.hyper_params.update_starts_from: if self.total_step % self.hyper_params.train_freq == 0: for _ in range(self.hyper_params.multiple_update): loss = self.update_model() losses.append(loss) # for logging # decrease epsilon self.epsilon = max( self.epsilon - (self.max_epsilon - self.min_epsilon) * self.hyper_params.epsilon_decay, self.min_epsilon, ) state = next_state score += reward t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step if losses: avg_loss = np.vstack(losses).mean(axis=0) log_value = (self.i_episode, avg_loss, score, avg_time_cost) self.write_log(log_value) if self.i_episode % self.args.save_period == 0: self.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.save_params(self.i_episode) self.interim_test()
class SACAgent(Agent): """SAC agent interacting with environment. Attrtibutes: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings hyper_params (ConfigDict): hyper-parameters network_cfg (ConfigDict): config of network for training agent optim_cfg (ConfigDict): config of optimizer state_dim (int): state size of env action_dim (int): action size of env memory (ReplayBuffer): replay memory actor (nn.Module): actor model to select actions actor_target (nn.Module): target actor model to select actions actor_optim (Optimizer): optimizer for training actor critic_1 (nn.Module): critic model to predict state values critic_2 (nn.Module): critic model to predict state values critic_target1 (nn.Module): target critic model to predict state values critic_target2 (nn.Module): target critic model to predict state values critic_optim1 (Optimizer): optimizer for training critic_1 critic_optim2 (Optimizer): optimizer for training critic_2 curr_state (np.ndarray): temporary storage of the current state total_step (int): total step numbers episode_step (int): step number of the current episode update_step (int): step number of updates i_episode (int): current episode number target_entropy (int): desired entropy used for the inequality constraint log_alpha (torch.Tensor): weight for entropy alpha_optim (Optimizer): optimizer for alpha """ def __init__( self, env: gym.Env, args: argparse.Namespace, log_cfg: ConfigDict, hyper_params: ConfigDict, backbone: ConfigDict, head: ConfigDict, optim_cfg: ConfigDict, ): """Initialize. Args: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings """ Agent.__init__(self, env, args, log_cfg) self.curr_state = np.zeros((1, )) self.total_step = 0 self.episode_step = 0 self.update_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.backbone_cfg = backbone self.head_cfg = head self.optim_cfg = optim_cfg self.state_dim = self.env.observation_space.shape self.action_dim = self.env.action_space.shape[0] # target entropy target_entropy = -np.prod((self.action_dim, )).item() # heuristic # automatic entropy tuning if hyper_params.auto_entropy_tuning: self.target_entropy = target_entropy self.log_alpha = torch.zeros(1, requires_grad=True, device=device) self.alpha_optim = optim.Adam([self.log_alpha], lr=optim_cfg.lr_entropy) self._initialize() self._init_network() # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" if not self.args.test: # replay memory self.memory = ReplayBuffer(self.hyper_params.buffer_size, self.hyper_params.batch_size) # pylint: disable=attribute-defined-outside-init def _init_network(self): """Initialize networks and optimizers.""" self.head_cfg.actor.configs.state_size = ( self.head_cfg.critic_vf.configs.state_size) = self.state_dim self.head_cfg.critic_qf.configs.state_size = (self.state_dim[0] + self.action_dim, ) self.head_cfg.actor.configs.output_size = self.action_dim # create actor self.actor = BaseNetwork(self.backbone_cfg.actor, self.head_cfg.actor).to(device) # create v_critic self.vf = BaseNetwork(self.backbone_cfg.critic_vf, self.head_cfg.critic_vf).to(device) self.vf_target = BaseNetwork(self.backbone_cfg.critic_vf, self.head_cfg.critic_vf).to(device) self.vf_target.load_state_dict(self.vf.state_dict()) # create q_critic self.qf_1 = BaseNetwork(self.backbone_cfg.critic_qf, self.head_cfg.critic_qf).to(device) self.qf_2 = BaseNetwork(self.backbone_cfg.critic_qf, self.head_cfg.critic_qf).to(device) # create optimizers self.actor_optim = optim.Adam( self.actor.parameters(), lr=self.optim_cfg.lr_actor, weight_decay=self.optim_cfg.weight_decay, ) self.vf_optim = optim.Adam( self.vf.parameters(), lr=self.optim_cfg.lr_vf, weight_decay=self.optim_cfg.weight_decay, ) self.qf_1_optim = optim.Adam( self.qf_1.parameters(), lr=self.optim_cfg.lr_qf1, weight_decay=self.optim_cfg.weight_decay, ) self.qf_2_optim = optim.Adam( self.qf_2.parameters(), lr=self.optim_cfg.lr_qf2, weight_decay=self.optim_cfg.weight_decay, ) # load the optimizer and model parameters if self.args.load_from is not None: self.load_params(self.args.load_from) def select_action(self, state: np.ndarray) -> np.ndarray: """Select an action from the input space.""" self.curr_state = state state = self._preprocess_state(state) # if initial random action should be conducted if (self.total_step < self.hyper_params.initial_random_action and not self.args.test): return np.array(self.env.action_space.sample()) if self.args.test: _, _, _, selected_action, _ = self.actor(state) else: selected_action, _, _, _, _ = self.actor(state) return selected_action.detach().cpu().numpy() # pylint: disable=no-self-use def _preprocess_state(self, state: np.ndarray) -> torch.Tensor: """Preprocess state so that actor selects an action.""" state = torch.FloatTensor(state).to(device) return state def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]: """Take an action and return the response of the env.""" next_state, reward, done, info = self.env.step(action) if not self.args.test: # if the last state is not a terminal state, store done as false done_bool = (False if self.episode_step == self.args.max_episode_steps else done) transition = (self.curr_state, action, reward, next_state, done_bool) self._add_transition_to_memory(transition) return next_state, reward, done, info def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" self.memory.add(transition) def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" self.update_step += 1 experiences = self.memory.sample() states, actions, rewards, next_states, dones = experiences new_actions, log_prob, pre_tanh_value, mu, std = self.actor(states) # train alpha if self.hyper_params.auto_entropy_tuning: alpha_loss = (-self.log_alpha * (log_prob + self.target_entropy).detach()).mean() self.alpha_optim.zero_grad() alpha_loss.backward() self.alpha_optim.step() alpha = self.log_alpha.exp() else: alpha_loss = torch.zeros(1) alpha = self.hyper_params.w_entropy # Q function loss masks = 1 - dones states_actions = torch.cat((states, actions), dim=-1) q_1_pred = self.qf_1(states_actions) q_2_pred = self.qf_2(states_actions) v_target = self.vf_target(next_states) q_target = rewards + self.hyper_params.gamma * v_target * masks qf_1_loss = F.mse_loss(q_1_pred, q_target.detach()) qf_2_loss = F.mse_loss(q_2_pred, q_target.detach()) # V function loss states_actions = torch.cat((states, new_actions), dim=-1) v_pred = self.vf(states) q_pred = torch.min(self.qf_1(states_actions), self.qf_2(states_actions)) v_target = q_pred - alpha * log_prob vf_loss = F.mse_loss(v_pred, v_target.detach()) # train Q functions self.qf_1_optim.zero_grad() qf_1_loss.backward() self.qf_1_optim.step() self.qf_2_optim.zero_grad() qf_2_loss.backward() self.qf_2_optim.step() # train V function self.vf_optim.zero_grad() vf_loss.backward() self.vf_optim.step() if self.update_step % self.hyper_params.policy_update_freq == 0: # actor loss advantage = q_pred - v_pred.detach() actor_loss = (alpha * log_prob - advantage).mean() # regularization mean_reg = self.hyper_params.w_mean_reg * mu.pow(2).mean() std_reg = self.hyper_params.w_std_reg * std.pow(2).mean() pre_activation_reg = self.hyper_params.w_pre_activation_reg * ( pre_tanh_value.pow(2).sum(dim=-1).mean()) actor_reg = mean_reg + std_reg + pre_activation_reg # actor loss + regularization actor_loss += actor_reg # train actor self.actor_optim.zero_grad() actor_loss.backward() self.actor_optim.step() # update target networks common_utils.soft_update(self.vf, self.vf_target, self.hyper_params.tau) else: actor_loss = torch.zeros(1) return ( actor_loss.item(), qf_1_loss.item(), qf_2_loss.item(), vf_loss.item(), alpha_loss.item(), ) def load_params(self, path: str): """Load model and optimizer parameters.""" Agent.load_params(self, path) params = torch.load(path) self.actor.load_state_dict(params["actor"]) self.qf_1.load_state_dict(params["qf_1"]) self.qf_2.load_state_dict(params["qf_2"]) self.vf.load_state_dict(params["vf"]) self.vf_target.load_state_dict(params["vf_target"]) self.actor_optim.load_state_dict(params["actor_optim"]) self.qf_1_optim.load_state_dict(params["qf_1_optim"]) self.qf_2_optim.load_state_dict(params["qf_2_optim"]) self.vf_optim.load_state_dict(params["vf_optim"]) if self.hyper_params.auto_entropy_tuning: self.alpha_optim.load_state_dict(params["alpha_optim"]) print("[INFO] loaded the model and optimizer from", path) def save_params(self, n_episode: int): # type: ignore """Save model and optimizer parameters.""" params = { "actor": self.actor.state_dict(), "qf_1": self.qf_1.state_dict(), "qf_2": self.qf_2.state_dict(), "vf": self.vf.state_dict(), "vf_target": self.vf_target.state_dict(), "actor_optim": self.actor_optim.state_dict(), "qf_1_optim": self.qf_1_optim.state_dict(), "qf_2_optim": self.qf_2_optim.state_dict(), "vf_optim": self.vf_optim.state_dict(), } if self.hyper_params.auto_entropy_tuning: params["alpha_optim"] = self.alpha_optim.state_dict() Agent.save_params(self, params, n_episode) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, policy_update_freq, avg_time_cost = log_value total_loss = loss.sum() print( "[INFO] episode %d, episode_step %d, total step %d, total score: %d\n" "total loss: %.3f actor_loss: %.3f qf_1_loss: %.3f qf_2_loss: %.3f " "vf_loss: %.3f alpha_loss: %.3f (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, total_loss, loss[0] * policy_update_freq, # actor loss loss[1], # qf_1 loss loss[2], # qf_2 loss loss[3], # vf loss loss[4], # alpha loss avg_time_cost, )) if self.args.log: wandb.log({ "score": score, "total loss": total_loss, "actor loss": loss[0] * policy_update_freq, "qf_1 loss": loss[1], "qf_2 loss": loss[2], "vf loss": loss[3], "alpha loss": loss[4], "time per each step": avg_time_cost, }) # pylint: disable=no-self-use, unnecessary-pass def pretrain(self): """Pretraining steps.""" pass def train(self): """Train the agent.""" # logger if self.args.log: self.set_wandb() # wandb.watch([self.actor, self.vf, self.qf_1, self.qf_2], log="parameters") # pre-training if needed self.pretrain() for self.i_episode in range(1, self.args.episode_num + 1): state = self.env.reset() done = False score = 0 self.episode_step = 0 loss_episode = list() t_begin = time.time() while not done: if self.args.render and self.i_episode >= self.args.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 state = next_state score += reward # training if len(self.memory) >= self.hyper_params.batch_size: for _ in range(self.hyper_params.multiple_update): loss = self.update_model() loss_episode.append(loss) # for logging t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step # logging if loss_episode: avg_loss = np.vstack(loss_episode).mean(axis=0) log_value = ( self.i_episode, avg_loss, score, self.hyper_params.policy_update_freq, avg_time_cost, ) self.write_log(log_value) if self.i_episode % self.args.save_period == 0: self.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.save_params(self.i_episode) self.interim_test()
class BCSACAgent(SACAgent): """BC with SAC agent interacting with environment. Attrtibutes: her (HER): hinsight experience replay transitions_epi (list): transitions per episode (for HER) desired_state (np.ndarray): desired state of current episode memory (ReplayBuffer): replay memory demo_memory (ReplayBuffer): replay memory for demo lambda2 (float): proportion of BC loss """ # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" # load demo replay memory with open(self.args.demo_path, "rb") as f: demo = list(pickle.load(f)) # HER if self.hyper_params.use_her: self.her = build_her(self.hyper_params.her) print(f"[INFO] Build {str(self.her)}.") if self.hyper_params.desired_states_from_demo: self.her.fetch_desired_states_from_demo(demo) self.transitions_epi: list = list() self.desired_state = np.zeros((1, )) demo = self.her.generate_demo_transitions(demo) if not self.her.is_goal_in_state: self.state_dim = (self.state_dim[0] * 2, ) else: self.her = None if not self.args.test: # Replay buffers demo_batch_size = self.hyper_params.demo_batch_size self.demo_memory = ReplayBuffer(len(demo), demo_batch_size) self.demo_memory.extend(demo) self.memory = ReplayBuffer(self.hyper_params.buffer_size, demo_batch_size) # set hyper parameters self.lambda2 = 1.0 / demo_batch_size def _preprocess_state(self, state: np.ndarray) -> torch.Tensor: """Preprocess state so that actor selects an action.""" if self.hyper_params.use_her: self.desired_state = self.her.get_desired_state() state = np.concatenate((state, self.desired_state), axis=-1) state = torch.FloatTensor(state).to(device) return state def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" if self.hyper_params.use_her: self.transitions_epi.append(transition) done = transition[ -1] or self.episode_step == self.args.max_episode_steps if done: # insert generated transitions if the episode is done transitions = self.her.generate_transitions( self.transitions_epi, self.desired_state, self.hyper_params.success_score, ) self.memory.extend(transitions) self.transitions_epi.clear() else: self.memory.add(transition) def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" self.update_step += 1 experiences, demos = self.memory.sample(), self.demo_memory.sample() states, actions, rewards, next_states, dones = experiences demo_states, demo_actions, _, _, _ = demos new_actions, log_prob, pre_tanh_value, mu, std = self.actor(states) pred_actions, _, _, _, _ = self.actor(demo_states) # train alpha if self.hyper_params.auto_entropy_tuning: alpha_loss = (-self.log_alpha * (log_prob + self.target_entropy).detach()).mean() self.alpha_optim.zero_grad() alpha_loss.backward() self.alpha_optim.step() alpha = self.log_alpha.exp() else: alpha_loss = torch.zeros(1) alpha = self.hyper_params.w_entropy # Q function loss masks = 1 - dones states_actions = torch.cat((states, actions), dim=-1) q_1_pred = self.qf_1(states_actions) q_2_pred = self.qf_2(states_actions) v_target = self.vf_target(next_states) q_target = rewards + self.hyper_params.gamma * v_target * masks qf_1_loss = F.mse_loss(q_1_pred, q_target.detach()) qf_2_loss = F.mse_loss(q_2_pred, q_target.detach()) # V function loss states_actions = torch.cat((states, new_actions), dim=-1) v_pred = self.vf(states) q_pred = torch.min(self.qf_1(states_actions), self.qf_2(states_actions)) v_target = q_pred - alpha * log_prob vf_loss = F.mse_loss(v_pred, v_target.detach()) # train Q functions self.qf_1_optim.zero_grad() qf_1_loss.backward() self.qf_1_optim.step() self.qf_2_optim.zero_grad() qf_2_loss.backward() self.qf_2_optim.step() # train V function self.vf_optim.zero_grad() vf_loss.backward() self.vf_optim.step() # update actor actor_loss = torch.zeros(1) n_qf_mask = 0 if self.update_step % self.hyper_params.policy_update_freq == 0: # bc loss qf_mask = torch.gt( self.qf_1(torch.cat((demo_states, demo_actions), dim=-1)), self.qf_1(torch.cat((demo_states, pred_actions), dim=-1)), ).to(device) qf_mask = qf_mask.float() n_qf_mask = int(qf_mask.sum().item()) if n_qf_mask == 0: bc_loss = torch.zeros(1, device=device) else: bc_loss = (torch.mul(pred_actions, qf_mask) - torch.mul( demo_actions, qf_mask)).pow(2).sum() / n_qf_mask # actor loss advantage = q_pred - v_pred.detach() actor_loss = (alpha * log_prob - advantage).mean() actor_loss = self.hyper_params.lambda1 * actor_loss + self.lambda2 * bc_loss # regularization mean_reg = self.hyper_params.w_mean_reg * mu.pow(2).mean() std_reg = self.hyper_params.w_std_reg * std.pow(2).mean() pre_activation_reg = self.hyper_params.w_pre_activation_reg * ( pre_tanh_value.pow(2).sum(dim=-1).mean()) actor_reg = mean_reg + std_reg + pre_activation_reg # actor loss + regularization actor_loss += actor_reg # train actor self.actor_optim.zero_grad() actor_loss.backward() self.actor_optim.step() # update target networks common_utils.soft_update(self.vf, self.vf_target, self.hyper_params.tau) return ( actor_loss.item(), qf_1_loss.item(), qf_2_loss.item(), vf_loss.item(), alpha_loss.item(), n_qf_mask, ) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, policy_update_freq, avg_time_cost = log_value total_loss = loss.sum() print( "[INFO] episode %d, episode_step %d, total step %d, total score: %d\n" "total loss: %.3f actor_loss: %.3f qf_1_loss: %.3f qf_2_loss: %.3f " "vf_loss: %.3f alpha_loss: %.3f n_qf_mask: %d (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, total_loss, loss[0] * policy_update_freq, # actor loss loss[1], # qf_1 loss loss[2], # qf_2 loss loss[3], # vf loss loss[4], # alpha loss loss[5], # n_qf_mask avg_time_cost, )) if self.args.log: wandb.log({ "score": score, "total loss": total_loss, "actor loss": loss[0] * policy_update_freq, "qf_1 loss": loss[1], "qf_2 loss": loss[2], "vf loss": loss[3], "alpha loss": loss[4], "time per each step": avg_time_cost, })
class DDPGAgent(Agent): """ActorCritic interacting with environment. Attributes: env (gym.Env): openAI Gym environment args (argparse.Namespace): arguments including hyperparameters and training settings hyper_params (ConfigDict): hyper-parameters network_cfg (ConfigDict): config of network for training agent optim_cfg (ConfigDict): config of optimizer state_dim (int): state size of env action_dim (int): action size of env memory (ReplayBuffer): replay memory noise (OUNoise): random noise for exploration actor (nn.Module): actor model to select actions actor_target (nn.Module): target actor model to select actions critic (nn.Module): critic model to predict state values critic_target (nn.Module): target critic model to predict state values actor_optim (Optimizer): optimizer for training actor critic_optim (Optimizer): optimizer for training critic curr_state (np.ndarray): temporary storage of the current state total_step (int): total step numbers episode_step (int): step number of the current episode i_episode (int): current episode number """ def __init__( self, env: gym.Env, args: argparse.Namespace, log_cfg: ConfigDict, hyper_params: ConfigDict, backbone: ConfigDict, head: ConfigDict, optim_cfg: ConfigDict, noise_cfg: ConfigDict, ): """Initialize.""" Agent.__init__(self, env, args, log_cfg) self.curr_state = np.zeros((1,)) self.total_step = 0 self.episode_step = 0 self.i_episode = 0 self.hyper_params = hyper_params self.backbone_cfg = backbone self.head_cfg = head self.optim_cfg = optim_cfg self.state_dim = self.env.observation_space.shape self.action_dim = self.env.action_space.shape[0] # set noise self.noise = OUNoise( self.action_dim, theta=noise_cfg.ou_noise_theta, sigma=noise_cfg.ou_noise_sigma, ) self._initialize() self._init_network() # pylint: disable=attribute-defined-outside-init def _initialize(self): """Initialize non-common things.""" if not self.args.test: # replay memory self.memory = ReplayBuffer( self.hyper_params.buffer_size, self.hyper_params.batch_size ) # pylint: disable=attribute-defined-outside-init def _init_network(self): """Initialize networks and optimizers.""" self.head_cfg.actor.configs.state_size = self.state_dim # ddpg critic gets state & action as input, # and make the type to tuple to conform the gym action_space type. self.head_cfg.critic.configs.state_size = (self.state_dim[0] + self.action_dim,) self.head_cfg.actor.configs.output_size = self.action_dim # create actor self.actor = BaseNetwork(self.backbone_cfg.actor, self.head_cfg.actor).to( device ) self.actor_target = BaseNetwork( self.backbone_cfg.actor, self.head_cfg.actor ).to(device) self.actor_target.load_state_dict(self.actor.state_dict()) # create critic self.critic = BaseNetwork(self.backbone_cfg.critic, self.head_cfg.critic).to( device ) self.critic_target = BaseNetwork( self.backbone_cfg.critic, self.head_cfg.critic ).to(device) self.critic_target.load_state_dict(self.critic.state_dict()) # create optimizer self.actor_optim = optim.Adam( self.actor.parameters(), lr=self.optim_cfg.lr_actor, weight_decay=self.optim_cfg.weight_decay, ) self.critic_optim = optim.Adam( self.critic.parameters(), lr=self.optim_cfg.lr_critic, weight_decay=self.optim_cfg.weight_decay, ) # load the optimizer and model parameters if self.args.load_from is not None: self.load_params(self.args.load_from) def select_action(self, state: np.ndarray) -> np.ndarray: """Select an action from the input space.""" self.curr_state = state state = self._preprocess_state(state) # if initial random action should be conducted if ( self.total_step < self.hyper_params.initial_random_action and not self.args.test ): return np.array(self.env.action_space.sample()) selected_action = self.actor(state).detach().cpu().numpy() if not self.args.test: noise = self.noise.sample() selected_action = np.clip(selected_action + noise, -1.0, 1.0) return selected_action # pylint: disable=no-self-use def _preprocess_state(self, state: np.ndarray) -> torch.Tensor: """Preprocess state so that actor selects an action.""" state = torch.FloatTensor(state).to(device) return state def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]: """Take an action and return the response of the env.""" next_state, reward, done, info = self.env.step(action) if not self.args.test: # if the last state is not a terminal state, store done as false done_bool = ( False if self.episode_step == self.args.max_episode_steps else done ) transition = (self.curr_state, action, reward, next_state, done_bool) self._add_transition_to_memory(transition) return next_state, reward, done, info def _add_transition_to_memory(self, transition: Tuple[np.ndarray, ...]): """Add 1 step and n step transitions to memory.""" self.memory.add(transition) def update_model(self) -> Tuple[torch.Tensor, ...]: """Train the model after each episode.""" experiences = self.memory.sample() states, actions, rewards, next_states, dones = experiences # G_t = r + gamma * v(s_{t+1}) if state != Terminal # = r otherwise masks = 1 - dones next_actions = self.actor_target(next_states) next_values = self.critic_target(torch.cat((next_states, next_actions), dim=-1)) curr_returns = rewards + self.hyper_params.gamma * next_values * masks curr_returns = curr_returns.to(device) # train critic gradient_clip_ac = self.hyper_params.gradient_clip_ac gradient_clip_cr = self.hyper_params.gradient_clip_cr values = self.critic(torch.cat((states, actions), dim=-1)) critic_loss = F.mse_loss(values, curr_returns) self.critic_optim.zero_grad() critic_loss.backward() nn.utils.clip_grad_norm_(self.critic.parameters(), gradient_clip_cr) self.critic_optim.step() # train actor actions = self.actor(states) actor_loss = -self.critic(torch.cat((states, actions), dim=-1)).mean() self.actor_optim.zero_grad() actor_loss.backward() nn.utils.clip_grad_norm_(self.actor.parameters(), gradient_clip_ac) self.actor_optim.step() # update target networks common_utils.soft_update(self.actor, self.actor_target, self.hyper_params.tau) common_utils.soft_update(self.critic, self.critic_target, self.hyper_params.tau) return actor_loss.item(), critic_loss.item() def load_params(self, path: str): """Load model and optimizer parameters.""" Agent.load_params(self, path) params = torch.load(path) self.actor.load_state_dict(params["actor_state_dict"]) self.actor_target.load_state_dict(params["actor_target_state_dict"]) self.critic.load_state_dict(params["critic_state_dict"]) self.critic_target.load_state_dict(params["critic_target_state_dict"]) self.actor_optim.load_state_dict(params["actor_optim_state_dict"]) self.critic_optim.load_state_dict(params["critic_optim_state_dict"]) print("[INFO] loaded the model and optimizer from", path) def save_params(self, n_episode: int): """Save model and optimizer parameters.""" params = { "actor_state_dict": self.actor.state_dict(), "actor_target_state_dict": self.actor_target.state_dict(), "critic_state_dict": self.critic.state_dict(), "critic_target_state_dict": self.critic_target.state_dict(), "actor_optim_state_dict": self.actor_optim.state_dict(), "critic_optim_state_dict": self.critic_optim.state_dict(), } Agent._save_params(self, params, n_episode) def write_log(self, log_value: tuple): """Write log about loss and score""" i, loss, score, avg_time_cost = log_value total_loss = loss.sum() print( "[INFO] episode %d, episode step: %d, total step: %d, total score: %d\n" "total loss: %f actor_loss: %.3f critic_loss: %.3f (spent %.6f sec/step)\n" % ( i, self.episode_step, self.total_step, score, total_loss, loss[0], loss[1], avg_time_cost, ) # actor loss # critic loss ) if self.args.log: wandb.log( { "score": score, "total loss": total_loss, "actor loss": loss[0], "critic loss": loss[1], "time per each step": avg_time_cost, } ) # pylint: disable=no-self-use, unnecessary-pass def pretrain(self): """Pretraining steps.""" pass def train(self): """Train the agent.""" # logger if self.args.log: self.set_wandb() # wandb.watch([self.actor, self.critic], log="parameters") # pre-training if needed self.pretrain() for self.i_episode in range(1, self.args.episode_num + 1): state = self.env.reset() done = False score = 0 self.episode_step = 0 losses = list() t_begin = time.time() while not done: if self.args.render and self.i_episode >= self.args.render_after: self.env.render() action = self.select_action(state) next_state, reward, done, _ = self.step(action) self.total_step += 1 self.episode_step += 1 if len(self.memory) >= self.hyper_params.batch_size: for _ in range(self.hyper_params.multiple_update): loss = self.update_model() losses.append(loss) # for logging state = next_state score += reward t_end = time.time() avg_time_cost = (t_end - t_begin) / self.episode_step # logging if losses: avg_loss = np.vstack(losses).mean(axis=0) log_value = (self.i_episode, avg_loss, score, avg_time_cost) self.write_log(log_value) losses.clear() if self.i_episode % self.args.save_period == 0: self.save_params(self.i_episode) self.interim_test() # termination self.env.close() self.save_params(self.i_episode) self.interim_test()