class RL_AGENT_ONE(): """ RL agent class """ def __init__(self, memory_size, batch_size, learn_start_time, learn_fre, lr, replay_iters, eps_T, eps_t_init, gamma, update_period, board, device, model_path, r_memory_Fname, o_model_name, model_load=False ): self.step_now = 0 # record the step self.reward_num = 0 self.reward_accumulated = 0 # delay reward self.final_tem = 10 # just for now self.step_last_update = 0 # record the last update time self.update_period = update_period # for the off policy self.learn_start_time = learn_start_time self.gamma = gamma self.batch_size = batch_size self.memory_size = memory_size self.alpha = 0.6 self.beta = 0.4 self.replay_bata_iters = replay_iters self.replay_eps = 1e-6 self.memory_min_num = 1000 #she min num to learn self.step_last_learn = 0 # record the last learn step self.learn_fre = learn_fre # step frequency to learn self.e_greedy = 1 # record the e_greedy self.eps_T = eps_T # par for updating the maybe step 80,0000 self.eps_t_init = eps_t_init # par for updating the eps self.device = device self.model_path = model_path self.mode_enjoy = model_load if model_load == False: self.policy_net = DQN(board[0], board[1], action_num).to(device) self.target_net = DQN(board[0], board[1], action_num).to(device) self.optimizer = optim.Adagrad(self.policy_net.parameters(), lr=lr) self.loss_fn = nn.functional.mse_loss # use the l1 loss self.memory = PrioritizedReplayBuffer(memory_size, self.alpha) self.beta_schedule = LinearSchedule(self.replay_bata_iters, self.beta, 1.0) else: self.load(o_model_name) #self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=lr) self.obs_new = None self.obs_old = None self.action = None self.action_old = None self.dqn_direct_flag = False # show if the dqn action is done self.model_save_flag = False def reset(self): """ reset the flag, state, reward for a new half or game """ self.obs_new = None self.obs_old = None self.action = None self.dqn_direct_flag = False def load(self, old_model): """ load the trained model par: |old_model:str, the name of the old model """ model_path_t = self.model_path + 't' + old_model self.target_net = torch.load(model_path_t, map_location=self.device) self.target_net.eval() print('target net par', self.target_net.state_dict()) def save(self): """ save the trained model """ t = time.strftime('%m%d%H%M%S') self.model_path_p = self.model_path + 'p' + t + '.pt' self.model_path_t = self.model_path + 't' + t + '.pt' print('target net par is', self.policy_net.state_dict()) torch.save(self.policy_net, self.model_path_p) torch.save(self.target_net, self.model_path_t) def learn(self, env, step_now, obs_old, action, obs_new, reward, done): """ This func is used to learn the agent par: |step_now: int, the global time of training |env: class-Environment, use it for nothing |transition: action, obs_new, reward |obs_old/new: instance obs |done: bool, if the game is over """ """ check if we should update the policy net """ if step_now - self.step_last_update == self.update_period: self.step_last_update = step_now self.target_net.load_state_dict(self.policy_net.state_dict()) """ init the obs_new for init learn """ state_new = self.feature_combine(obs_new) # get the feature state state_old = self.feature_combine(obs_old) # get the feature state transition_now = (state_old, action, \ reward, state_new) """ augument reward data to the memory """ if reward > 0: self.memory.add(*self.data_augment(transition_now), done) self.memory.add(state_old, action, \ reward, state_new, done) """ select the batch memory to update the network """ step_diff = step_now - self.step_last_learn if step_now > self.learn_start_time and \ step_diff >= self.learn_fre and \ self.memory.__len__() > self.memory_min_num: self.step_last_learn = step_now # update the self.last learn batch_data = self.memory.sample(self.batch_size, \ beta=self.beta_schedule.value(step_now)) s_o_set, actions, rewards, s_n_set, dones, weights, idx_set = batch_data loss_list = [] batch_idx_list = [] reward_not_zero_cnt = 0 actions = [torch.tensor(a, device=self.device) \ for a in actions] """ cnt how many times learn for non reward """ actions_new = [self.policy_net(s_n).detach().max(0)[1] \ for s_n in s_n_set] target_values = [self.gamma*self.target_net(s_n).gather(0, actions_new[idx]) \ for idx, s_n in enumerate(s_n_set)] target_values = [t_*(1 - d_) + r_ \ for t_, d_, r_ in zip(target_values, dones, rewards)] policy_values = [self.policy_net(s).gather(0, a) \ for s, a in zip(s_o_set, actions)] loss = [self.loss_fn(p_v, t_v)+ self.replay_eps \ for p_v, t_v in zip(policy_values, target_values)] loss_back = sum(loss) / self.batch_size """ update the par """ self.optimizer.zero_grad() loss_back.backward() self.optimizer.step() self.memory.update_priorities(idx_set, torch.tensor(loss).detach().numpy()) """ check if we should save the model """ if self.model_save_flag == True: self.save() def select_egreedy(self, q_value, step_now): """ select the action by e-greedy policy arg: |q_value: the greedy standard """ self.e_greedy = np.exp((self.eps_t_init - step_now) / self.eps_T) if self.e_greedy < 0.3: self.e_greedy = 0.3 """ if we are in enjoying mode """ if self.mode_enjoy == True: print('q_value is', q_value) self.e_greedy = 0.3 """ select the action by e-greedy """ if np.random.random() > self.e_greedy: action = action_list[ \ np.where(q_value==np.max(q_value))[0][0] ] else: action = action_list[np.random.randint(action_num)] return action def feature_combine(self, obs): """ This file extract features from the obs.layers and combine them into a new feature layer Used feature layers: """ """ combine all the layers """ feature_c = obs.copy() feature_c = feature_c.astype(np.float32) feature_c = torch.tensor(feature_c, dtype=torch.float32, device=self.device) size = feature_c.shape feature_c = feature_c.resize_(1, 1, size[0], size[1]) return feature_c def data_augment(self, transition): """ use this func to flip the feature, to boost the experience, deal the problem of sparse reward par: |transition: tuple, with (feature_o, action, feature_n, reward) """ flip_ver_dim = 2 feature_old = transition[0] action = transition[1] feature_new = transition[3] reward = transition[2] """ vertical flip """ feature_o_aug = feature_old.flip([flip_ver_dim]) feature_n_aug = feature_new.flip([flip_ver_dim]) """ vertical :action flip """ if action == 0: action = 1 elif action == 1: action = 0 return feature_o_aug, action, reward, feature_n_aug def act(self, map, step_now): """ this func is interact with the competition func """ dqn_action = -1 # reset state_old = self.feature_combine(map) # get the feature q_values = self.policy_net(state_old) action = self.select_egreedy( \ q_values.cpu().detach().numpy(), step_now)# features to model return action def act_enjoy(self, map): """ this func is interact with the competition func """ dqn_action = -1 # reset step_now = self.eps_T state_old = self.feature_combine(map) # get the feature q_values = self.target_net(state_old) action = self.select_egreedy( \ q_values.cpu().detach().numpy(), step_now)# features to model return action
def learn(env, q_func, lr=1e-2, max_timesteps=1000000, buffer_size=50000, exploration_fraction=1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph def make_obs_ph(name): return ObservationInput(env.observation_space, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) #exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), # initial_p=0.7, # final_p=0.15) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_state(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) #logger.record_tabular("replay buffer size", replay_buffer.__len__()) logger.dump_tabular() #if done and num_episodes % 100 == 1: # filehandler = open("cartpole_MDP_replay_buffer.obj","wb") # pickle.dump(replay_buffer,filehandler) # filehandler.close() # print('MDP model samples saved',replay_buffer.__len__()) # file = open("cartpole_MDP_replay_buffer.obj",'rb') # reloaded_replay_buffer = pickle.load(file) # file.close() # print('MDP model samples loaded',reloaded_replay_buffer.__len__()) if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_state(model_file) #file = open("cartpole_MDP_replay_buffer.obj",'rb') #reloaded_replay_buffer = pickle.load(file) #file.close() #reloaded_replay_buffer.__len__() filehandler = open("cartpole_MDP_replay_buffer.obj", "wb") pickle.dump(replay_buffer, filehandler) filehandler.close() print('MDP model samples saved', replay_buffer.__len__()) file = open("cartpole_MDP_replay_buffer.obj", 'rb') reloaded_replay_buffer = pickle.load(file) file.close() print('MDP model samples loaded', reloaded_replay_buffer.__len__()) return act