class BaselinesPERBuffer(SimpleReplayBuffer): def __init__( self, max_replay_buffer_size, alpha, ): self.underlying = PrioritizedReplayBuffer(max_replay_buffer_size, alpha) def add_sample(self, observation, action, reward, terminal, next_observation, **kwargs): self.underlying.add(observation, action, reward, next_observation, terminal) def random_batch(self, batch_size, beta): return self.underlying.sample(batch_size, beta) def num_steps_can_sample(self): return len(self.underlying) def update_priorities(self, *args, **kwargs): self.underlying.update_priorities(*args, **kwargs)
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.01, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=50, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, callback=None, num_optimisation_steps=40): """Train a deepq model. Parameters ------- env : gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput((env.observation_space.shape[0] * 2, ), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_max_rewards = [env.reward_max] episode_rewards = [0.0] saved_mean_reward_diff = None # difference in saved reward obs = env.reset(seed=np.random.randint(0, 1000)) with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") episode_buffer = [None] * env.n episode_timestep = 0 for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value action = act(np.concatenate([obs, env.goal])[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. episode_buffer[episode_timestep] = (obs, action, rew, new_obs, float(done)) episode_timestep += 1 replay_buffer.add(np.concatenate([obs, env.goal]), action, rew, np.concatenate([new_obs, env.goal]), float(done)) obs = new_obs episode_rewards[-1] += rew num_episodes = len(episode_rewards) #######end of episode if done: for episode in range(episode_timestep): obs1, action1, _, new_obs1, done1 = episode_buffer[episode] goal_prime = new_obs1 rew1 = env.calculate_reward(new_obs1, goal_prime) replay_buffer.add(np.concatenate([obs1, goal_prime]), action1, rew1, np.concatenate([new_obs1, goal_prime]), float(done1)) episode_timestep = 0 obs = env.reset(seed=np.random.randint(0, 1000)) episode_rewards.append(0.0) episode_max_rewards.append(env.reward_max) #############Training Q if t > learning_starts and num_episodes % train_freq == 0: for i in range(num_optimisation_steps): # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs( td_errors) + prioritized_replay_eps replay_buffer.update_priorities( batch_idxes, new_priorities) #############Training Q target if t > learning_starts and num_episodes % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) mean_100ep_max_reward = np.mean(episode_max_rewards[-101:-1]) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 100 episode max reward", mean_100ep_max_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and num_episodes % checkpoint_freq == 0): if saved_mean_reward_diff is None or mean_100ep_max_reward - mean_100ep_reward < saved_mean_reward_diff: if print_freq is not None: logger.log( "Saving model due to mean reward difference decrease: {} -> {}" .format(saved_mean_reward_diff, mean_100ep_max_reward - mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward_diff = mean_100ep_max_reward - mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward_diff)) U.load_state(model_file) return ActWrapper(act, act_params)
class Agent: def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size()[0] self.action_size = self.env.get_action_size() self.low_bound, self.high_bound = self.env.get_bounds() self.buffer = PrioritizedReplayBuffer(parameters.BUFFER_SIZE, parameters.ALPHA) print("Creation of the actor-critic network...") self.network = Network(self.state_size, self.action_size, self.low_bound, self.high_bound) print("Network created !\n") self.epsilon = parameters.EPSILON_START self.beta = parameters.BETA_START self.best_run = -1e10 self.sess.run(tf.global_variables_initializer()) def run(self): self.nb_ep = 1 self.total_steps = 0 for self.nb_ep in range(1, parameters.TRAINING_STEPS + 1): episode_reward = 0 episode_step = 0 done = False memory = deque() # Initial state s = self.env.reset() max_steps = parameters.MAX_EPISODE_STEPS + self.nb_ep // parameters.EP_ELONGATION while episode_step < max_steps and not done: if random.random() < self.epsilon: a = self.env.random() else: # choose action based on deterministic policy a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: [s]}) # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY s_, r, done, info = self.env.act(a) memory.append((s, a, r, s_, 0.0 if done else 1.0)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = 0 for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT**(i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) # update network weights to fit a minibatch of experience if self.total_steps % parameters.TRAINING_FREQ == 0 and \ len(self.buffer) >= parameters.BATCH_SIZE: minibatch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR td_errors, _, _ = self.sess.run( [ self.network.td_errors, self.network.critic_train_op, self.network.actor_train_op ], feed_dict={ self.network.state_ph: minibatch[0], self.network.action_ph: minibatch[1], self.network.reward_ph: minibatch[2], self.network.next_state_ph: minibatch[3], self.network.is_not_terminal_ph: minibatch[4] }) self.buffer.update_priorities(minibatch[6], td_errors + 1e-6) # update target networks _ = self.sess.run(self.network.update_slow_targets_op) episode_reward += r s = s_ episode_step += 1 self.total_steps += 1 self.nb_ep += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print( 'Episode %2i, Reward: %7.3f, Steps: %i, Epsilon : %7.3f, Max steps : %i' % (self.nb_ep, episode_reward, episode_step, self.epsilon, max_steps)) DISPLAYER.add_reward(episode_reward) if episode_reward > self.best_run and self.nb_ep > 100: self.best_run = episode_reward print("Best agent ! ", episode_reward) SAVER.save('best') if self.nb_ep % parameters.SAVE_FREQ == 0: SAVER.save(self.nb_ep) def play(self, number_run): print("Playing for", number_run, "runs") try: for i in range(number_run): s = self.env.reset() episode_reward = 0 done = False while not done: a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: [s]}) s_, r, done, info = self.env.act(a) episode_reward += r print("Episode reward :", episode_reward) except KeyboardInterrupt as e: pass except Exception as e: print("Exception :", e) finally: print("End of the demo") self.env.close() def close(self): self.env.close()
if args.param_noise_threshold >= 0.: update_param_noise_threshold = args.param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(num_iters) + exploration.value(num_iters) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = (num_iters % args.param_noise_update_freq == 0) action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] reset = False new_obs, rew, done, info = env.step(action) replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs if done: num_iters_since_reset = 0 obs = env.reset() reset = True if (num_iters > max(5 * args.batch_size, args.replay_buffer_size // 20) and num_iters % args.learning_freq == 0): # Sample a bunch of transitions from replay buffer if args.prioritized: experience = replay_buffer.sample(args.batch_size, beta=beta_schedule.value(num_iters)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(args.batch_size) weights = np.ones_like(rewards)
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_state(model_file) return act
def learn(env, q_func, num_actions=64*64, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() # Set up summary Ops summary_ops, summary_vars = build_summaries() writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph) def make_obs_ph(name): return U.BatchInput((64, 64), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10 ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] episode_minerals = [0.0] saved_mean_reward = None path_memory = np.zeros((64,64)) obs = env.reset() # Select all marines first step_result = env.step(actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] obs = player_relative + path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if(player[0]>32): obs = shift(LEFT, player[0]-32, obs) elif(player[0]<32): obs = shift(RIGHT, 32 - player[0], obs) if(player[1]>32): obs = shift(UP, player[1]-32, obs) elif(player[1]<32): obs = shift(DOWN, 32 - player[1], obs) reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] reset = False coord = [player[0], player[1]] rew = 0 path_memory_ = np.array(path_memory, copy=True) if(action == 0): #UP if(player[1] >= 16): coord = [player[0], player[1] - 16] path_memory_[player[1] - 16 : player[1], player[0]] = -1 elif(player[1] > 0): coord = [player[0], 0] path_memory_[0 : player[1], player[0]] = -1 else: rew -= 1 elif(action == 1): #DOWN if(player[1] <= 47): coord = [player[0], player[1] + 16] path_memory_[player[1] : player[1] + 16, player[0]] = -1 elif(player[1] > 47): coord = [player[0], 63] path_memory_[player[1] : 63, player[0]] = -1 else: rew -= 1 elif(action == 2): #LEFT if(player[0] >= 16): coord = [player[0] - 16, player[1]] path_memory_[player[1], player[0] - 16 : player[0]] = -1 elif(player[0] < 16): coord = [0, player[1]] path_memory_[player[1], 0 : player[0]] = -1 else: rew -= 1 elif(action == 3): #RIGHT if(player[0] <= 47): coord = [player[0] + 16, player[1]] path_memory_[player[1], player[0] : player[0] + 16] = -1 elif(player[0] > 47): coord = [63, player[1]] path_memory_[player[1], player[0] : 63] = -1 else: rew -= 1 else: #Cannot move, give minus reward rew -= 1 if(path_memory[coord[1],coord[0]] != 0): rew -= 0.5 path_memory = np.array(path_memory_) #print("action : %s Coord : %s" % (action, coord)) new_action = [sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord])] step_result = env.step(actions=new_action) player_relative = step_result[0].observation["screen"][_PLAYER_RELATIVE] new_obs = player_relative + path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if(player[0]>32): new_obs = shift(LEFT, player[0]-32, new_obs) elif(player[0]<32): new_obs = shift(RIGHT, 32 - player[0], new_obs) if(player[1]>32): new_obs = shift(UP, player[1]-32, new_obs) elif(player[1]<32): new_obs = shift(DOWN, 32 - player[1], new_obs) rew += step_result[0].reward * 10 done = step_result[0].step_type == environment.StepType.LAST # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew episode_minerals[-1] += step_result[0].reward if done: obs = env.reset() player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] obs = player_relative + path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if(player[0]>32): obs = shift(LEFT, player[0]-32, obs) elif(player[0]<32): obs = shift(RIGHT, 32 - player[0], obs) if(player[1]>32): obs = shift(UP, player[1]-32, obs) elif(player[1]<32): obs = shift(DOWN, 32 - player[1], obs) # Select all marines first env.step(actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) episode_rewards.append(0.0) episode_minerals.append(0.0) path_memory = np.zeros((64,64)) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) mean_100ep_mineral = round(np.mean(episode_minerals[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: summary_str = sess.run(summary_ops, feed_dict={ summary_vars[0]: mean_100ep_reward, summary_vars[1]: mean_100ep_mineral }) writer.add_summary(summary_str, num_episodes) writer.flush() logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 100 episode mineral", mean_100ep_mineral) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
def learn(env, q_func, num_actions=4, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() #def make_obs_ph(name): #return U.BatchInput((16, 16), name=name) obs_spec = env.observation_spec()[0] screen_dim = obs_spec['feature_screen'][1:3] def make_obs_ph(name): #return ObservationInput(ob_space, name=name) return ObservationInput(Box(low=0.0, high=screen_dim[0], shape=(screen_dim[0], screen_dim[1], 1)), name=name) act_x, train_x, update_target_x, debug_x = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, scope="deepq_x") act_y, train_y, update_target_y, debug_y = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, scope="deepq_y") act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer_x = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) replay_buffer_y = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule_x = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) beta_schedule_y = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer_x = ReplayBuffer(buffer_size) replay_buffer_y = ReplayBuffer(buffer_size) beta_schedule_x = None beta_schedule_y = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target_x() update_target_y() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() # Select all marines first obs = env.step( actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) print(obs[0].observation.keys()) player_relative = obs[0].observation["feature_screen"][_PLAYER_RELATIVE] screen = (player_relative == _PLAYER_NEUTRAL).astype(int) #+ path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join("model/", "mineral_shards") print(model_file) for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action_x = act_x(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] action_y = act_y(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] reset = False coord = [player[0], player[1]] rew = 0 coord = [action_x, action_y] if _MOVE_SCREEN not in obs[0].observation["available_actions"]: obs = env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) new_action = [ sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord]) ] # else: # new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) player_relative = obs[0].observation["feature_screen"][ _PLAYER_RELATIVE] new_screen = (player_relative == _PLAYER_NEUTRAL).astype(int) player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] rew = obs[0].reward done = obs[0].step_type == environment.StepType.LAST # Store transition in the replay buffer. replay_buffer_x.add(screen, action_x, rew, new_screen, float(done)) replay_buffer_y.add(screen, action_y, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: obs = env.reset() player_relative = obs[0].observation["feature_screen"][ _PLAYER_RELATIVE] screent = (player_relative == _PLAYER_NEUTRAL).astype(int) player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] # Select all marines first env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) episode_rewards.append(0.0) #episode_minerals.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience_x = replay_buffer_x.sample( batch_size, beta=beta_schedule_x.value(t)) (obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x, weights_x, batch_idxes_x) = experience_x experience_y = replay_buffer_y.sample( batch_size, beta=beta_schedule_y.value(t)) (obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y, batch_idxes_y) = experience_y else: obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x = replay_buffer_x.sample( batch_size) weights_x, batch_idxes_x = np.ones_like(rewards_x), None obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y = replay_buffer_y.sample( batch_size) weights_y, batch_idxes_y = np.ones_like(rewards_y), None td_errors_x = train_x(obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x, weights_x) td_errors_y = train_x(obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y) if prioritized_replay: new_priorities_x = np.abs( td_errors_x) + prioritized_replay_eps new_priorities_y = np.abs( td_errors_y) + prioritized_replay_eps replay_buffer_x.update_priorities(batch_idxes_x, new_priorities_x) replay_buffer_y.update_priorities(batch_idxes_y, new_priorities_y) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target_x() update_target_y() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act_x), ActWrapper(act_y)
def main(): with tf_util.make_session(4) as session: act_fn, train_fn, target_update_fn, debug_fn = deepq.build_train( make_obs_ph=lambda name: Uint8Input([input_height, input_width], name=name), q_func=q_function_nn, num_actions=action_space_size, optimizer=tf.train.AdamOptimizer(learning_rate=0.001), gamma=0.99, grad_norm_clipping=10, double_q=False) epsilon = PiecewiseSchedule([(0, 1.0), (10000, 1.0), # since we start training at 10000 steps (20000, 0.4), (50000, 0.2), (100000, 0.1), (500000, 0.05)], outside_value=0.01) replay_memory = PrioritizedReplayBuffer(replay_memory_size, replay_alpha) beta = LinearSchedule(int(NUM_STEPS/4), initial_p=replay_beta, final_p=1.0) tf_util.initialize() target_update_fn() state = env.reset() state = preprocess_frame(state) watch_train = False dq = [] # a queue to store episode rewards start_step = 1 episode = 1 if is_load_model: dict_state = load_model() replay_memory = dict_state["replay_memory"] dq = dict_state["dq"] start_step = dict_state["step"] + 1 for step in itertools.count(start=start_step): action = act_fn(state[np.newaxis], update_eps=epsilon.value(step))[0] state_tplus1, reward, is_finished, _ = env.step(action) dq.append(reward) if watch_flag: env.render() time.sleep(1.0/fps) state_tplus1 = preprocess_frame(state_tplus1) replay_memory.add(state, action, reward, state_tplus1, float(is_finished)) state = state_tplus1 if is_finished: ep_reward = sum(dq) log.logkv("Steps", step) log.logkv("Episode reward", ep_reward) log.logkv("Episode number", episode) log.dumpkvs() print("Step", step, ". Finished episode", episode, "with reward ", ep_reward) dq = [] state = preprocess_frame(env.reset()) episode += 1 for _ in range(30): # NOOP for ~90 frames to skip the start screen. Range 30 used because each # step executed for 3 frames on average. Action 0 stands for doing nothing env.step(0) if watch_flag: env.render() if step > 10000 and step % learn_freq == 0: # only start training after 10000 steps are completed batch = replay_memory.sample(batch_size, beta=beta.value(step)) states = batch[0] actions = batch[1] rewards = batch[2] states_tplus1 = batch[3] finished_vars = batch[4] weights = batch[5] state_indeces = batch[6] errors = train_fn(states, actions, rewards, states_tplus1, finished_vars, weights) priority_order_new = np.abs(errors) + replay_epsilon replay_memory.update_priorities(state_indeces, priority_order_new) if step % save_freq == 0: print("State save", step) dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) if step > NUM_STEPS: print("Finished training. Saving model to ./saved_model/model.ckpt") dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) break
def train(env, eval_env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, my_skill_set=None, log_dir = None, num_eval_episodes=10, render=False, render_eval = False, commit_for = 1 ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model if my_skill_set: assert commit_for>=1, "commit_for >= 1" save_idx = 0 with U.single_threaded_session() as sess: ## restore if my_skill_set: action_shape = my_skill_set.len else: action_shape = env.action_space.n # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return U.BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=action_shape, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': action_shape, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() # sess.run(tf.variables_initializer(new_variables)) # sess.run(tf.global_variables_initializer()) update_target() if my_skill_set: ## restore skills my_skill_set.restore_skillset(sess=sess) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True model_saved = False model_file = os.path.join(log_dir, "model", "deepq") # save the initial act model print("Saving the starting model") os.makedirs(os.path.dirname(model_file), exist_ok=True) act.save(model_file + '.pkl') for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True paction = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] if(my_skill_set): skill_obs = obs.copy() primitive_id = paction rew = 0. for _ in range(commit_for): ## break actions into primitives and their params action = my_skill_set.pi(primitive_id=primitive_id, obs = skill_obs.copy(), primitive_params=None) new_obs, skill_rew, done, _ = env.step(action) if render: # print(action) env.render() sleep(0.1) rew += skill_rew skill_obs = new_obs terminate_skill = my_skill_set.termination(new_obs) if done or terminate_skill: break else: action= paction env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) if render: env.render() sleep(0.1) # Store transition in the replay buffer for the outer env replay_buffer.add(obs, paction, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True print("Time:%d, episodes:%d"%(t,len(episode_rewards))) # add hindsight experience if t > learning_starts and t % train_freq == 0: # print('Training!') # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() # print(len(episode_rewards), episode_rewards[-11:-1]) mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if (checkpoint_freq is not None and t > learning_starts and num_episodes > 50 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) act.save(model_file + '%d.pkl'%save_idx) save_idx += 1 model_saved = True saved_mean_reward = mean_100ep_reward # else: # print(saved_mean_reward, mean_100ep_reward) if (eval_env is not None) and t > learning_starts and t % target_network_update_freq == 0: # dumping other stats logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("%d time spent exploring", int(100 * exploration.value(t))) print("Testing!") eval_episode_rewards = [] eval_episode_successes = [] for i in range(num_eval_episodes): eval_episode_reward = 0. eval_obs = eval_env.reset() eval_obs_start = eval_obs.copy() eval_done = False while(not eval_done): eval_paction = act(np.array(eval_obs)[None])[0] if(my_skill_set): eval_skill_obs = eval_obs.copy() eval_primitive_id = eval_paction eval_r = 0. for _ in range(commit_for): ## break actions into primitives and their params eval_action, _ = my_skill_set.pi(primitive_id=eval_primitive_id, obs = eval_skill_obs.copy(), primitive_params=None) eval_new_obs, eval_skill_rew, eval_done, eval_info = eval_env.step(eval_action) # print('env reward:%f'%eval_skill_rew) if render_eval: print("Render!") eval_env.render() print("rendered!") eval_r += eval_skill_rew eval_skill_obs = eval_new_obs eval_terminate_skill = my_skill_set.termination(eval_new_obs) if eval_done or eval_terminate_skill: break else: eval_action= eval_paction env_action = eval_action reset = False eval_new_obs, eval_r, eval_done, eval_info = eval_env.step(env_action) if render_eval: # print("Render!") eval_env.render() # print("rendered!") eval_episode_reward += eval_r # print("eval_r:%f, eval_episode_reward:%f"%(eval_r, eval_episode_reward)) eval_obs = eval_new_obs eval_episode_success = (eval_info["done"]=="goal reached") if(eval_episode_success): logger.info("success, training epoch:%d,starting config:"%t) eval_episode_rewards.append(eval_episode_reward) eval_episode_successes.append(eval_episode_success) combined_stats = {} # print(eval_episode_successes, np.mean(eval_episode_successes)) combined_stats['eval/return'] = normal_mean(eval_episode_rewards) combined_stats['eval/success'] = normal_mean(eval_episode_successes) combined_stats['eval/episodes'] = (len(eval_episode_rewards)) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) print("dumping the stats!") logger.dump_tabular() if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file)
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=3000, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=3000, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name), q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), # gamma=gamma, # grad_norm_clipping=10, # param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(100000), initial_p=1.0, final_p=0.02) # Initialize the paramete print(type(act))rs and copy them to the target network. U.initialize() update_target() old_state = None formula_LTLf_1 = "!F(die)" monitoring_RightToLeft = MonitoringSpecification( ltlf_formula=formula_LTLf_1, r=1, c=-10, s=1, f=-10 ) monitoring_specifications = [monitoring_RightToLeft] stepCounter = 0 done = False def RightToLeftConversion(observation) -> TraceStep: print(stepCounter) if(done and not(stepCounter>=199)): die=True else: die=False dictionary={'die': die} print(dictionary) return dictionary multi_monitor = MultiRewardMonitor( monitoring_specifications=monitoring_specifications, obs_to_trace_step=RightToLeftConversion ) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) episodeCounter=0 num_episodes=0 for t in itertools.count(): # Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] #print(action) new_obs, rew, done, _ = env.step(action) stepCounter+=1 rew, is_perm = multi_monitor(new_obs) old_state=new_obs # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 if episodeCounter % 100 == 0 or episodeCounter<1: # Show off the result #print("coming here Again and Again") env.render() if done: episodeCounter+=1 num_episodes+=1 obs = env.reset() episode_rewards.append(0) multi_monitor.reset() stepCounter=0 else: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if t > 1000: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32) train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards)) # Update target network periodically. if t % 1000 == 0: update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) if done and len(episode_rewards) % 10 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", len(episode_rewards)) logger.record_tabular("mean 100 episode reward", round(np.mean(episode_rewards[-101:-1]), 1)) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 500 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) act.save_act() #save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward # if model_saved: # if print_freq is not None: # logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) # load_variables(model_file) return act
def learn( env, q_func, # input obs,num od actions etc and obtain q value for each action num_actions=16, # available actions: up down left right lr=5e-4, max_timesteps=100000, buffer_size=50000, # size of the replay buffer exploration_fraction=0.1, # during the first 10% training period, exploration rate is decreased from 1 to 0.02 exploration_final_eps=0.02, # final value of random action probability train_freq=1, # update the model every `train_freq` steps. batch_size=32, # size of a batched sampled from replay buffer for training print_freq=1, checkpoint_freq=10000, learning_starts=1000, # time for the model to collect transitions before learning starts gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, # beta keeps to be beta0 prioritized_replay_eps=1e-6, num_cpu=16, # number of cpus to use for training param_noise=False, # whether or not to use parameter space noise param_noise_threshold=0.05, callback=None): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph( name ): # Creates a placeholder for a batch of tensors of a given shape and dtype return U_b.BatchInput((16, 16), name=name) act_x, train_x, update_target_x, debug_x = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, # clip gradient norms to this value scope="deepq_x") act_y, train_y, update_target_y, debug_y = deepq.build_train( #because there are two players in the game make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, scope="deepq_y") act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer_x = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) replay_buffer_y = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule_x = LinearSchedule( prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, # 0.4->1 final_p=1.0) beta_schedule_y = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer_x = ReplayBuffer(buffer_size) replay_buffer_y = ReplayBuffer(buffer_size) beta_schedule_x = None beta_schedule_y = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. ---环境初始化 U.initialize() update_target_x() update_target_y() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() # start a new episode # Select all marines first ---选择所有个体,获得新的观察 obs = env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) # Apply actions, step the world forward, and return observations. # 查看返回的字典中屏幕中的目标关系分布图:1表示着地图中个体的位置,3表示着矿物的位置,就是终端的矩阵图 player_relative = obs[0].observation["feature_screen"][ _PLAYER_RELATIVE] #obs is a 'TimeStep' whose type is tuple of ['step_type', 'reward', 'discount', 'observation'];step_type.first or mid or last # 矿的位置 0,1矩阵分布 screen = (player_relative == _PLAYER_NEUTRAL).astype( int ) #+ path_memory screen=1 or 0 to indicate the location of mineral # 队友的位置,给出行列信息 player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero( ) #the location of team member: row, col <-> y,x # print(player_relative) # print('*************') # print(screen) # print(_PLAYER_FRIENDLY) # # print(player_x) # print(player_y) # print('ssss) # if (len(player_x) == 0): # player_x = np.array([0]) # # print('player_x from null to 0') # # print(player_x) # if (len(player_y) == 0): # player_y = np.array([0]) # # print('player_y from null to 0') # # print(player_y) player = [int(player_x.mean()), int(player_y.mean())] reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join("model/", "mineral_shards") #给了一个模型保存路径 print(model_file) for t in range(max_timesteps): # print('timestep=',t) if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value--更新探索并采取动作 kwargs = {} if not param_noise: update_eps = exploration.value(t) # 输出一个1->0.02之间的值 update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True # actions obtained after exploration action_x = act_x(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] # print('action_x is ',action_x) action_y = act_y(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] # print('action_y is ',action_y) reset = False # coord = [player[0], player[1]] rew = 0 #reward coord = [action_x, action_y] if _MOVE_SCREEN not in obs[0].observation["available_actions"]: obs = env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) # obs = env.step(actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) new_action = [ sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord]) ] # else: # new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) player_relative = obs[0].observation["feature_screen"][ _PLAYER_RELATIVE] # print(player_relative) new_screen = (player_relative == _PLAYER_NEUTRAL).astype(int) # print(_PLAYER_FRIENDLY) # print(player_x) # print(player_y) # print('ssssss2') # if (len(player_x) == 0): # player_x = np.array([0]) # # print('player_x from null to 0') # # print(player_x) # if (len(player_y) == 0): # player_y = np.array([0]) # # print('player_y from null to 0') # # print(player_y) # player = [int(player_x.mean()), int(player_y.mean())] rew = obs[0].reward done = obs[0].step_type == environment.StepType.LAST # Store transition in the replay buffer. replay_buffer_x.add(screen, action_x, rew, new_screen, float(done)) replay_buffer_y.add(screen, action_y, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: obs = env.reset() # player_relative = obs[0].observation["feature_screen"][_PLAYER_RELATIVE] # screent = (player_relative == _PLAYER_NEUTRAL).astype(int) # # player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() # player = [int(player_x.mean()), int(player_y.mean())] # Select all marines first env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) episode_rewards.append(0.0) # print("episode_rewards is ", episode_rewards) print('num_episodes is', len(episode_rewards)) #episode_minerals.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: #train_freq=1: update the model every `train_freq` steps # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience_x = replay_buffer_x.sample( batch_size, beta=beta_schedule_x.value(t)) (obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x, weights_x, batch_idxes_x) = experience_x experience_y = replay_buffer_y.sample( batch_size, beta=beta_schedule_y.value(t)) (obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y, batch_idxes_y) = experience_y else: obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x = replay_buffer_x.sample( batch_size) weights_x, batch_idxes_x = np.ones_like( rewards_x ), None # weights_x is an array padded with 1 which has the same shape as rewards_x obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y = replay_buffer_y.sample( batch_size) weights_y, batch_idxes_y = np.ones_like(rewards_y), None td_errors_x = train_x(obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x, weights_x) td_errors_y = train_y(obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y) if prioritized_replay: new_priorities_x = np.abs( td_errors_x) + prioritized_replay_eps new_priorities_y = np.abs( td_errors_y) + prioritized_replay_eps replay_buffer_x.update_priorities(batch_idxes_x, new_priorities_x) replay_buffer_y.update_priorities(batch_idxes_y, new_priorities_y) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target_x() update_target_y() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) # round: sishewuru value num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act_x), ActWrapper(act_y)
class Agent: def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size() self.action_size = self.env.get_action_size() print("Creation of the main QNetwork...") self.mainQNetwork = QNetwork(self.state_size, self.action_size, 'main') print("Main QNetwork created !\n") print("Creation of the target QNetwork...") self.targetQNetwork = QNetwork(self.state_size, self.action_size, 'target') print("Target QNetwork created !\n") self.buffer = PrioritizedReplayBuffer(parameters.BUFFER_SIZE, parameters.ALPHA) self.epsilon = parameters.EPSILON_START self.beta = parameters.BETA_START trainables = tf.trainable_variables() self.update_target_ops = updateTargetGraph(trainables) self.nb_ep = 1 def pre_train(self): print("Beginning of the pre-training...") for i in range(parameters.PRE_TRAIN_STEPS): s = self.env.reset() done = False episode_step = 0 episode_reward = 0 while episode_step < parameters.MAX_EPISODE_STEPS and not done: a = random.randint(0, self.action_size - 1) s_, r, done, info = self.env.act(a) self.buffer.add(s, a, r, s_, done) s = s_ episode_reward += r episode_step += 1 if i % 100 == 0: print("Pre-train step n", i) print("End of the pre training !") def run(self): print("Beginning of the run...") self.pre_train() self.total_steps = 0 self.nb_ep = 1 while self.nb_ep < parameters.TRAINING_STEPS: s = self.env.reset() episode_reward = 0 done = False memory = deque() discount_R = 0 episode_step = 0 # Render parameters self.env.set_render(self.nb_ep % parameters.RENDER_FREQ == 0) while episode_step < parameters.MAX_EPISODE_STEPS and not done: if random.random() < self.epsilon: a = random.randint(0, self.action_size - 1) else: a = self.sess.run( self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s_, r, done, info = self.env.act(a) episode_reward += r memory.append((s, a, r, s_, done)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = r_mem for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT**(i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) if episode_step % parameters.TRAINING_FREQ == 0: train_batch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) # Incr beta if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR feed_dict = {self.mainQNetwork.inputs: train_batch[3]} mainQaction = self.sess.run(self.mainQNetwork.predict, feed_dict=feed_dict) feed_dict = {self.targetQNetwork.inputs: train_batch[3]} targetQvalues = self.sess.run(self.targetQNetwork.Qvalues, feed_dict=feed_dict) # Done multiplier : # equals 0 if the episode was done # equals 1 else done_multiplier = (1 - train_batch[4]) doubleQ = targetQvalues[range(parameters.BATCH_SIZE), mainQaction] targetQvalues = train_batch[2] + \ parameters.DISCOUNT * doubleQ * done_multiplier feed_dict = { self.mainQNetwork.inputs: train_batch[0], self.mainQNetwork.Qtarget: targetQvalues, self.mainQNetwork.actions: train_batch[1] } td_error, _ = self.sess.run( [self.mainQNetwork.td_error, self.mainQNetwork.train], feed_dict=feed_dict) self.buffer.update_priorities(train_batch[6], td_error + 1e-6) update_target(self.update_target_ops, self.sess) s = s_ episode_step += 1 self.total_steps += 1 # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY DISPLAYER.add_reward(episode_reward) self.total_steps += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print('Episode %2i, Reward: %7.3f, Steps: %i, Epsilon: %f' % (self.nb_ep, episode_reward, episode_step, self.epsilon)) self.nb_ep += 1 def play(self, number_run): print("Playing for", number_run, "runs") try: for i in range(number_run): s = self.env.reset() episode_reward = 0 done = False while not done: a = self.sess.run( self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s, r, done, info = self.env.act(a) episode_reward += r print("Episode reward :", episode_reward) except KeyboardInterrupt as e: pass except Exception as e: print("Exception :", e) finally: self.env.set_render(False) print("End of the demo") self.env.close() def stop(self): self.env.close()
def learn_neural_linear( env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=10, #100 checkpoint_freq=10000, checkpoint_path=None, learning_starts=999, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, ddqn=False, prior="no prior", actor="dqn", **network_kwargs): #Train a deepq model. # Create all the functions necessary to train the model checkpoint_path = logger.get_dir() sess = get_session() set_global_seeds(seed) blr_params = BLRParams() q_func = deepq.models.cnn_to_mlp( convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens=[blr_params.feat_dim], dueling=bool(0), ) # q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, feat_dim, feat, feat_target, target, last_layer_weights, blr_ops, blr_helpers = deepq.build_train_neural_linear( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, double_q=ddqn, actor=actor) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) # BLR # preliminearies num_actions = env.action_space.n w_mu = np.zeros((num_actions, feat_dim)) w_sample = np.random.normal(loc=0, scale=0.1, size=(num_actions, feat_dim)) w_target = np.random.normal(loc=0, scale=0.1, size=(num_actions, feat_dim)) w_cov = np.zeros((num_actions, feat_dim, feat_dim)) for a in range(num_actions): w_cov[a] = np.eye(feat_dim) phiphiT = np.zeros((num_actions, feat_dim, feat_dim)) phiY = np.zeros((num_actions, feat_dim)) a0 = 6 b0 = 6 a_sig = [a0 for _ in range(num_actions)] b_sig = [b0 for _ in range(num_actions)] yy = [0 for _ in range(num_actions)] blr_update = 0 for t in tqdm(range(total_timesteps)): if callback is not None: if callback(locals(), globals()): break # if t % 1000 == 0: # print("{}/{}".format(t,total_timesteps)) # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], w_sample[None]) env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # clipping like in BDQN rew = np.sign(rew) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True # sample new w from posterior if t > 0 and t % blr_params.sample_w == 0: for i in range(num_actions): if blr_params.no_prior: w_sample[i] = np.random.multivariate_normal( w_mu[i], w_cov[i]) else: sigma2_s = b_sig[i] * invgamma.rvs(a_sig[i]) w_sample[i] = np.random.multivariate_normal( w_mu[i], sigma2_s * w_cov[i]) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. # when target network updates we update our posterior belifes # and transfering information from the old target # to our new target blr_update += 1 if blr_update == 10: #10 print("updating posterior parameters") if blr_params.no_prior: phiphiT, phiY, w_mu, w_cov, a_sig, b_sig = BayesRegNoPrior( phiphiT, phiY, w_target, replay_buffer, feat, feat_target, target, num_actions, blr_params, w_mu, w_cov, sess.run(last_layer_weights), prior, blr_ops, blr_helpers) else: phiphiT, phiY, w_mu, w_cov, a_sig, b_sig = BayesRegWithPrior( phiphiT, phiY, w_target, replay_buffer, feat, feat_target, target, num_actions, blr_params, w_mu, w_cov, sess.run(last_layer_weights)) blr_update = 0 print("updateing target, steps {}".format(t)) update_target() w_target = w_mu mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) mean_10ep_reward = round(np.mean(episode_rewards[-11:-1]), 1) num_episodes = len(episode_rewards) # if done and print_freq is not None and len(episode_rewards) % print_freq == 0: if t % 10000 == 0: #1000 logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 10 episode reward", mean_10ep_reward) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def train_model_and_save_results(env_name, n_hid, lr, eps_min, delta, gamma, kappa, prioritize, alpha, beta, timesteps_per_update_target, timesteps_per_action_taken, total_timesteps, perturb, folder_path): # env_name: environment name # n_hid: list of numbers of units in hidden layers # eps_min: final exploration epsilon # delta: linear decrement of exploration epsilon per timestep # kappa: list of 3 constants for next state, reward and done predictions # prioritize: boolean, whether to use prioritized reply buffer or not # alpha: prioritization constant # beta: weight correction constant # perturb: boolean, whether to use parameter noise explotration # folder_path: to save results env = gym.make(env_name) eps = tf.get_variable('eps', (), initializer=tf.constant_initializer(1.0), dtype=tf.float32) update_eps = tf.assign(eps, tf.maximum(eps - delta, eps_min)) n_in = 1 for n in env.observation_space.shape: n_in *= n n_out = env.action_space.n sizes = [n_in] + n_hid + [n_out] # create networks with tf.variable_scope('Q'): Q_params, Q_function = Q_model(sizes) with tf.variable_scope('Q_target'): Q_params_target, Q_function_target = Q_model(sizes) if perturb: with tf.variable_scope('Q_perturbed'): Q_params_perturbed, Q_function_perturbed = Q_model(sizes) with tf.variable_scope('Q_adapt'): Q_params_adapt, Q_function_adapt = Q_model(sizes) perturbation_scale = tf.get_variable( "perturbation_scale", (), initializer=tf.constant_initializer(0.01)) larger_perturbation_scale = perturbation_scale * 1.01 smaller_perturbation_scale = perturbation_scale / 1.01 # create placeholders obses = tf.placeholder(tf.float32, shape=[None, n_in]) actions = tf.placeholder(tf.int32, shape=[None]) rewards = tf.placeholder(tf.float32, shape=[None]) next_obses = tf.placeholder(tf.float32, shape=[None, n_in]) dones = tf.placeholder(tf.float32, shape=[None]) weights = tf.placeholder(tf.float32, shape=[None]) # create outputs of Q functions Q_function_obses = Q_function(obses) Q_values_per_action = Q_function_obses[0] Q_actions = tf.argmax(Q_values_per_action, axis=1) if perturb: ops = [] for i in range(len(Q_params) - 6): ops.append( tf.assign( Q_params_perturbed[i], Q_params[i] + tf.random_normal(shape=tf.shape(Q_params[i]), mean=0., stddev=perturbation_scale))) assign_perturbed = tf.group(*ops) ops = [] for i in range(len(Q_params) - 6): ops.append( tf.assign( Q_params_adapt[i], Q_params[i] + tf.random_normal(shape=tf.shape(Q_params[i]), mean=0., stddev=perturbation_scale))) assign_adapt = tf.group(*ops) Q_values_per_action_perturbed = Q_function_perturbed(obses)[0] Q_actions_perturbed = tf.argmax(Q_values_per_action_perturbed, axis=1) Q_values_per_action_adapt = Q_function_adapt(obses)[0] kl = tf.reduce_sum(tf.nn.softmax(Q_values_per_action) * (tf.log(tf.nn.softmax(Q_values_per_action)) - tf.log(tf.nn.softmax(Q_values_per_action_adapt))), axis=-1) kl_mean = tf.reduce_mean(kl) kl_eps = -tf.log(1 - eps + eps / n_out) with tf.control_dependencies([assign_adapt]): update_perturbation_scale = tf.cond( kl_mean < kl_eps, lambda: perturbation_scale.assign(perturbation_scale * 1.01), lambda: perturbation_scale.assign(perturbation_scale / 1.01)) Q_values = tf.reduce_sum(Q_values_per_action * tf.one_hot(actions, n_out), axis=1) Q_values_target = rewards + gamma * tf.reduce_sum( tf.one_hot(tf.argmax(Q_function(next_obses)[0], axis=1), n_out) * Q_function_target(next_obses)[0], axis=1) * (1.0 - dones) # create errors TD = Q_values - tf.stop_gradient(Q_values_target) TD_error = tf.reduce_mean(weights * Huber_loss(TD)) state_difference = tf.reduce_sum( Q_function_obses[1] * tf.expand_dims(tf.one_hot(actions, n_out), 2), axis=1) - next_obses state_difference_error = tf.reduce_mean( tf.expand_dims(weights, 1) * Huber_loss(state_difference)) reward_difference = tf.reduce_sum( Q_function_obses[2] * tf.one_hot(actions, n_out), axis=1) - rewards reward_difference_error = tf.reduce_mean(weights * Huber_loss(reward_difference)) done_difference = tf.nn.sigmoid_cross_entropy_with_logits( labels=dones, logits=tf.reduce_sum(Q_function_obses[3] * tf.one_hot(actions, n_out), axis=1)) done_difference_error = tf.reduce_mean(weights * done_difference) # compute total error total_error = TD_error if kappa[0] > 0: total_error += kappa[0] * state_difference_error if kappa[1] > 0: total_error += kappa[1] * reward_difference_error if kappa[2] > 0: total_error += kappa[2] * done_difference_error # create gradients to save grads = tf.gradients(total_error, Q_params) grad_sum_of_squares = sum( [tf.reduce_sum(x * x) for x in grads if x is not None]) # create optimizer and update rule Adam = tf.train.AdamOptimizer(learning_rate=lr) update = Adam.minimize(total_error, var_list=Q_params) # initialize session sess = tf.Session() # define update_target def update_target(): for i in range(len(Q_params)): sess.run(Q_params_target[i].assign(Q_params[i])) if prioritize: replay_buffer = PrioritizedReplayBuffer(50000, alpha) else: replay_buffer = ReplayBuffer(50000) # initialize parameters to save episode_length = [0] TD_errors = [] state_difference_errors = [] reward_difference_errors = [] done_difference_errors = [] grad_sums_of_squares = [] # initialize all variables sess.run(tf.global_variables_initializer()) # start training obs = env.reset() for t in range(total_timesteps): # choose action if t % timesteps_per_action_taken == 0: if (not perturb) and np.random.uniform() < sess.run(eps): # take random action action = np.random.randint(n_out) else: if perturb: # take randomly perturb action, then update scale action = sess.run(Q_actions_perturbed, feed_dict={obses: obs[None]})[0] sess.run(update_perturbation_scale, feed_dict={obses: obs[None]}) else: # take optimal action action = sess.run(Q_actions, feed_dict={obses: obs[None]})[0] next_obs, rew, done, _ = env.step(action) episode_length[-1] += 1 # add experience to buffer replay_buffer.add(obs, action, rew, next_obs, float(done)) obs = next_obs if (done): # episode finished print("episode length = " + str(episode_length[-1])) obs = env.reset() episode_length.append(0) if perturb: sess.run(assign_perturbed) print(sess.run(perturbation_scale)) if t % timesteps_per_update_target == 0: # update target print("t = " + str(t) + ", updating target...") update_target() if t > 1000: # update primary network if prioritize: beta_current = (beta * (total_timesteps - t) + t) / total_timesteps obses_current, actions_current, rewards_current, next_obses_current, dones_current, weights_current, idxes_current = replay_buffer.sample( 32, beta_current) else: obses_current, actions_current, rewards_current, next_obses_current, dones_current = replay_buffer.sample( 32) weights_current = np.ones_like(rewards_current) feed_dict = { obses: obses_current, actions: actions_current, rewards: rewards_current, next_obses: next_obses_current, dones: dones_current, weights: weights_current } if prioritize: new_weights = np.abs(sess.run(TD, feed_dict=feed_dict)) + 1e-6 replay_buffer.update_priorities(idxes_current, new_weights) TD_errors.append( sess.run(TD_error, feed_dict=feed_dict).astype(np.float64)) state_difference_errors.append( sess.run(state_difference_error, feed_dict=feed_dict).astype(np.float64)) reward_difference_errors.append( sess.run(reward_difference_error, feed_dict=feed_dict).astype(np.float64)) done_difference_errors.append( sess.run(done_difference_error, feed_dict=feed_dict).astype(np.float64)) grad_sums_of_squares.append( sess.run(grad_sum_of_squares, feed_dict=feed_dict).astype(np.float64)) sess.run(update, feed_dict=feed_dict) # update eps and beta sess.run(update_eps) # training finished, save progress print('saving progress and params...') if not os.path.exists(folder_path + 'params/'): os.makedirs(folder_path + 'params/') with open(folder_path + 'progress.json', 'w') as f: data = { 'episode_length': episode_length, 'TD_errors': TD_errors, 'state_difference_errors': state_difference_errors, 'reward_difference_errors': reward_difference_errors, 'done_difference_errors': done_difference_errors, 'grad_sums_of_squares': grad_sums_of_squares } json.dump(data, f) saver = tf.train.Saver({v.name: v for v in Q_params}) saver.save(sess, folder_path + 'params/params.ckpt') with open(folder_path + 'params/params.pkl', 'wb') as f: cloudpickle.dump([sess.run(param) for param in Q_params], f) print('saved...') # tidy up sess.close() tf.reset_default_graph()
print(var.name, val) episode_rewards = [0.0] loss_array = [] obs = env.reset() for t in itertools.count(): # Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] # print(action) action = list(action) maxQ = max(action) selected = action.index(maxQ) new_obs, rew, done, _ = env.step(selected) # Store transition in the replay buffer. avail = np.zeros(env.action_space.n, dtype=float).tolist() replay_buffer.add(obs, selected, rew, new_obs, float(done), avail) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0) is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 td_error, rew_t_ph, q_t_selected_target, q_t_selected = -1, -1, -1, -1 if is_solved: # Show off the result env.render() else: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if t > 1000:
def learn(env, q_func, num_actions=3, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=100, print_freq=15, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None, demo_replay=[]): """Train a deepq model. Parameters ------- q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = TU.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput((64, 64), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. TU.initialize() update_target() group_id = 0 old_num = 0 reset = True Action_Choose = False player = [] episode_rewards = [0.0] saved_mean_reward = None marine_record = {} obs = env.reset() screen = obs[0].observation["screen"][_UNIT_TYPE] obs, xy_per_marine = common.init(env, obs) with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True # custom process for DefeatZerglingsAndBanelings reset = False Action_Choose = not (Action_Choose) if Action_Choose == True: #the first action obs, screen, group_id, player = common.select_marine(env, obs) marine_record = common.run_record(marine_record, obs) else: # the second action action = act(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] action = common.check_action(obs, action) new_action = None obs, new_action, marine_record = common.marine_action( env, obs, group_id, player, action, marine_record) army_count = env._obs[0].observation.player_common.army_count try: if army_count > 0 and ( _MOVE_SCREEN in obs[0].observation["available_actions"]): obs = env.step(actions=new_action) else: new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) except Exception as e: print(new_action) print(e) new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) # get the new screen in action 2 player_y, player_x = np.nonzero( obs[0].observation["screen"][_SELECTED] == 1) new_screen = obs[0].observation["screen"][_UNIT_TYPE] for i in range(len(player_y)): new_screen[player_y[i]][player_x[i]] = 49 #update every step rew = obs[0].reward done = obs[0].step_type == environment.StepType.LAST episode_rewards[-1] += rew reward = episode_rewards[-1] if Action_Choose == False: # only store the screen after the action is done replay_buffer.add(screen, action, rew, new_screen, float(done)) mirror_new_screen = common._map_mirror(new_screen) mirror_screen = common._map_mirror(screen) replay_buffer.add(mirror_screen, action, rew, mirror_new_screen, float(done)) if done: obs = env.reset() Action_Choose = False group_list = common.init(env, obs) episode_rewards.append(0.0) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() num_episodes = len(episode_rewards) #test for me if num_episodes > old_num: old_num = num_episodes print("now the episode is {}".format(num_episodes)) #test for me if (num_episodes > 102): mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) else: mean_100ep_reward = round(np.mean(episode_rewards), 1) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: print("get the log") logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
def learn( env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, epoch_steps=20000, gpu_memory=1.0, double_q=False, scope="deepq", directory='.', nb_test_steps=10000, ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.per_process_gpu_memory_fraction = gpu_memory config.gpu_options.polling_inactive_delay_msecs = 25 sess = tf.Session(config=config) sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph def make_obs_ph(name): return ObservationInput(env.observation_space, name=name) act, act_greedy, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, double_q=bool(double_q), scope=scope) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True #recording records = {'loss': [], 'online_reward': [], 'test_reward': []} with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_state(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True ep_losses, ep_means, losses = [], [], [] print("===== LEARNING STARTS =====") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. timelimit_env = env while (not hasattr(timelimit_env, '_elapsed_steps')): timelimit_env = timelimit_env.env if timelimit_env._elapsed_steps < timelimit_env._max_episode_steps: # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) else: replay_buffer.add(obs, action, rew, new_obs, float(not done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if losses: ep_losses.append(np.mean(losses)) losses = [] if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) losses.append(td_errors) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if (t + 1) % epoch_steps == 0 and (t + 1) > learning_starts: test_reward = test(env, act_greedy, nb_test_steps=nb_test_steps) records['test_reward'].append(test_reward) records['loss'].append(np.mean(ep_losses)) records['online_reward'].append( round(np.mean(episode_rewards[-101:-1]), 1)) pickle.dump(records, open(os.path.join(directory, "records.pkl"), "wb")) print("==== EPOCH %d ===" % ((t + 1) / epoch_steps)) print(tabulate([[k, v[-1]] for (k, v) in records.items()])) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and (t + 1) > learning_starts and num_episodes > 100 and (t + 1) % checkpoint_freq == 0): print("Saving model to model_%d.pkl" % (t + 1)) act.save( os.path.join(directory, "model_" + str(t + 1) + ".pkl")) if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_state(model_file) return act, records
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=3000, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=3000, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name), q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=0.99, double_q=False #grad_norm_clipping=10, # param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(10000), initial_p=1.0, final_p=0.02) # Initialize the parameters and copy them to the target network. U.initialize() update_target() old_state = None formula_LTLf_1 = "!d U(g)" monitoring_RightToLeft = MonitoringSpecification( ltlf_formula=formula_LTLf_1, r=0, c=-0.01, s=10, f=-10 ) formula_LTLf_2 = "F(G(bb)) " # break brick monitoring_BreakBrick = MonitoringSpecification( ltlf_formula=formula_LTLf_2, r=10, c=-0.01, s=10, f=0 ) monitoring_specifications = [monitoring_BreakBrick, monitoring_RightToLeft] def RightToLeftConversion(observation) -> TraceStep: done=False global old_state if arrays_equal(observation[-9:], np.zeros((len(observation[-9:])))): ### Checking if all Bricks are broken # print('goal reached') goal = True # all bricks are broken done = True else: goal = False dead = False if done and not goal: dead = True order = check_ordered(observation[-9:]) if not order: # print('wrong order', state[5:]) dead=True done = True if old_state is not None: # if not the first state if not arrays_equal(old_state[-9:], observation[-9:]): brick_broken = True # check_ordered(state[-9:]) # print(' a brick is broken') else: brick_broken = False else: brick_broken = False dictionary={'g': goal, 'd': dead, 'o': order, 'bb':brick_broken} #print(dictionary) return dictionary multi_monitor = MultiRewardMonitor( monitoring_specifications=monitoring_specifications, obs_to_trace_step=RightToLeftConversion ) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True # initialize done = False #monitor.get_reward(None, False) # add first state in trace with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) episodeCounter=0 num_episodes=0 for t in itertools.count(): # Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] #print(action) #print(action) new_obs, rew, done, _ = env.step(action) done=False #done=False ## FOR FIRE ONLY #print(new_obs) #new_obs.append() start_time = time.time() rew, is_perm = multi_monitor(new_obs) #print("--- %s seconds ---" % (time.time() - start_time)) old_state=new_obs #print(rew) done=done or is_perm # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 if episodeCounter % 100 == 0 or episodeCounter<1: # Show off the result #print("coming here Again and Again") env.render() if done: episodeCounter+=1 num_episodes+=1 obs = env.reset() old_state=None episode_rewards.append(0) multi_monitor.reset() #monitor.get_reward(None, False) else: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if t > 1000: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(64) train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards)) # Update target network periodically. if t % 1000 == 0: update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) if done and len(episode_rewards) % 10 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", len(episode_rewards)) logger.record_tabular("currentEpisodeReward", episode_rewards[-1]) logger.record_tabular("mean 100 episode reward", round(np.mean(episode_rewards[-101:-1]), 1)) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) act.save_act() #save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward # if model_saved: # if print_freq is not None: # logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) # load_variables(model_file) return act
class deep_q_net: def __init__(self, state_size, action_size, path="Learning/Weights/weights.h5", new_weights=True, memory_size=100000, replay_start_size=6000, epsilon=1, epsilon_min=.05, max_step_for_epsilon_decay=125000*3, prioritized_replay=False, alpha=0.6, beta=0.4, beta_inc=0.0000005): self.state_size = state_size self.action_size = action_size self.path = path self.use_prio_buffer = prioritized_replay if not prioritized_replay: self.memory = deque(maxlen=memory_size) else: self.prio_memory = PrioritizedReplayBuffer(memory_size, alpha) self.beta = beta self.beta_inc = beta_inc # self.beta_schedule = LinearSchedule(max_step_for_epsilon_decay, # 1, # 0.4) self.gamma = 0.95 # discount rate self.epsilon = epsilon # exploration rate self.epsilon_min = epsilon_min self.epsilon_decay = 0.995 self.max_step_for_lin_epsilon_decay = max_step_for_epsilon_decay self.epsilon_decay_linear = self.epsilon / self.max_step_for_lin_epsilon_decay self.learning_rate = 0.00025 self.replay_start_size = replay_start_size self.model = self._build_model() self.target_model = clone_model(self.model) #self._build_model() self.target_model.compile(optimizer='sgd', loss='mse') self.step = 0 if not new_weights: self.model.load_weights(path) self.update_target() self.callback = Evaluation.create_tensorboard() def _build_model(self): config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 K.tensorflow_backend.set_session(tf.Session(config=config)) # Neural Net for Deep-Q learning Model model = Sequential() model.add(Conv2D(16, 5, strides=1, activation='relu', input_shape=self.state_size, #data_format="channels_first", kernel_initializer='he_normal', padding='same')) model.add(Conv2D(32, 3, strides=1, activation='relu', kernel_initializer='he_normal', #data_format="channels_first" padding='same')) # model.add(Convolution2D(64, 3, strides=1, activation='relu')) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_normal')) model.add(Dense(self.action_size, activation='linear', kernel_initializer='he_normal')) def abs_err(prediction, target): return K.abs(prediction - target) model.compile(loss=self._huber_loss,#'mse', # optimizer=Adam(lr=self.learning_rate)) optimizer=RMSprop(lr=self.learning_rate), metrics=[abs_err]) return model def _huber_loss(self, target, prediction): error = prediction - target print("Loss: ", K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1)) return K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1) def remember(self, state, action, reward, next_state, done): if not self.use_prio_buffer: self.memory.append((state, action, reward, next_state, done)) else: self.prio_memory.add(state, action, reward, next_state, done) # print(state, action, reward, next_state, done) def act(self, state): if np.random.rand() <= self.epsilon: return random.randrange(self.action_size) act_values = self.model.predict(state) return np.argmax(act_values[0]) # returns action def get_minibatch(self, batch_size): if not self.use_prio_buffer: return None, random.sample(self.memory, batch_size), None else: self.beta += self.beta_inc states, actions, rewards, next_states, dones, weights, idxes = self.prio_memory.sample(batch_size, self.beta) minibatch = zip(states, actions, rewards, next_states, dones) return idxes, minibatch, weights # return None, self.prio_memory.sample(batch_size,self.beta), None def replay(self, batch_size): if (not self.use_prio_buffer and len(self.memory) < self.replay_start_size) or \ (self.use_prio_buffer and len(self.prio_memory) < self.replay_start_size): return tree_idx, minibatch, is_weights = self.get_minibatch(batch_size) # random.sample(self.memory, batch_size) # except ValueError: # minibatch = self.memory if isinstance(self.state_size, int): state_array = np.ndarray(shape=(32, self.state_size)) else: state_array = np.ndarray(shape=(32, self.state_size[0], self.state_size[1], self.state_size[2])) y = np.ndarray(shape=(32, self.action_size)) actions = np.ndarray(shape=(32,), dtype=int) i = 0 for state, action, reward, next_state, done in minibatch: # self.model.fit(state, target_f, epochs=1, verbose=0) state_array[i] = state y[i] = self.set_target(reward, state, next_state, action, done) actions[i] = int(action) i += 1 # self.model.fit(state, target_f, epochs=1, verbose=0) # self.model.fit(prediction, y, batch_size=1) # self.model.train_on_batch(state_array, y) self.train(state_array, y, is_weights, tree_idx, actions, minibatch) self.decrease_epsilon_linear() def train(self, states, target, is_weights, tree_idx, actions, minibatch): if not self.use_prio_buffer: self.model.train_on_batch(states, target) # self.model.fit(prediction, target, verbose=0, callbacks=[self.callback]) else: is_weights = np.reshape(is_weights, newshape=(is_weights.shape[0])) self.model.fit(states, target, sample_weight=is_weights, verbose=0) #abs_errs = np.abs(self.model.predict_on_batch(states)[np.arange(32), actions] - target[np.arange(32), actions]) batch_size = len(minibatch) predictions = self.model.predict_on_batch(states)[np.arange(batch_size), actions] tar_vals = [self.set_target(r,s,na,a,d) for r,s,na,a,d in minibatch] #target[np.arange(batch_size), actions] # huber loss error = predictions - tar_vals #print("Loss: ", K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1)) abs_errs = np.sqrt(1 + np.square(error)) - 1 #abs_errs = self._huber_loss(tar_vals, predictions) abs_errs = abs_errs + 0.01 self.prio_memory.update_priorities(tree_idx, abs_errs) def set_target(self, reward, state, next_state, action, done): target = reward if not done: target = reward + self.gamma * \ np.amax(self.target_model.predict(next_state)[0]) target_f = self.model.predict(state) target_f[0][action] = target return target_f def decrease_epsilon_factor(self): if (not self.use_prio_buffer and len(self.memory) < self.replay_start_size) or \ (self.use_prio_buffer and len(self.prio_memory) < self.replay_start_size): return if self.epsilon > self.epsilon_min: self.epsilon *= self.epsilon_decay def decrease_epsilon_linear(self): if self.epsilon > self.epsilon_min: self.epsilon -= self.epsilon_decay_linear def save_weights(self): self.model.save_weights(self.path) def update_target(self): self.target_model.set_weights(self.model.get_weights())
def learn(env, q_func, num_actions=4, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput((32, 32), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, scope="deepq") # # act_y, train_y, update_target_y, debug_y = deepq.build_train( # make_obs_ph=make_obs_ph, # q_func=q_func, # num_actions=num_actions, # optimizer=tf.train.AdamOptimizer(learning_rate=lr), # gamma=gamma, # grad_norm_clipping=10, # scope="deepq_y" # ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) # replay_buffer_y = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) # beta_schedule_y = LinearSchedule(prioritized_replay_beta_iters, # initial_p=prioritized_replay_beta0, # final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) # replay_buffer_y = ReplayBuffer(buffer_size) beta_schedule = None # beta_schedule_y = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule( schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() # update_target_y() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() # Select all marines first obs = env.step( actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = (player_relative == _PLAYER_NEUTRAL).astype(int) #+ path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if (player[0] > 16): screen = shift(LEFT, player[0] - 16, screen) elif (player[0] < 16): screen = shift(RIGHT, 16 - player[0], screen) if (player[1] > 16): screen = shift(UP, player[1] - 16, screen) elif (player[1] < 16): screen = shift(DOWN, 16 - player[1], screen) reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join("model/", "mineral_shards") print(model_file) for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act( np.array(screen)[None], update_eps=update_eps, **kwargs)[0] # action_y = act_y(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] reset = False coord = [player[0], player[1]] rew = 0 if (action == 0): #UP if (player[1] >= 8): coord = [player[0], player[1] - 8] #path_memory_[player[1] - 16 : player[1], player[0]] = -1 elif (player[1] > 0): coord = [player[0], 0] #path_memory_[0 : player[1], player[0]] = -1 #else: # rew -= 1 elif (action == 1): #DOWN if (player[1] <= 23): coord = [player[0], player[1] + 8] #path_memory_[player[1] : player[1] + 16, player[0]] = -1 elif (player[1] > 23): coord = [player[0], 31] #path_memory_[player[1] : 63, player[0]] = -1 #else: # rew -= 1 elif (action == 2): #LEFT if (player[0] >= 8): coord = [player[0] - 8, player[1]] #path_memory_[player[1], player[0] - 16 : player[0]] = -1 elif (player[0] < 8): coord = [0, player[1]] #path_memory_[player[1], 0 : player[0]] = -1 #else: # rew -= 1 elif (action == 3): #RIGHT if (player[0] <= 23): coord = [player[0] + 8, player[1]] #path_memory_[player[1], player[0] : player[0] + 16] = -1 elif (player[0] > 23): coord = [31, player[1]] #path_memory_[player[1], player[0] : 63] = -1 if _MOVE_SCREEN not in obs[0].observation["available_actions"]: obs = env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) new_action = [ sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord]) ] # else: # new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] new_screen = (player_relative == _PLAYER_NEUTRAL).astype( int) #+ path_memory player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if (player[0] > 16): new_screen = shift(LEFT, player[0] - 16, new_screen) elif (player[0] < 16): new_screen = shift(RIGHT, 16 - player[0], new_screen) if (player[1] > 16): new_screen = shift(UP, player[1] - 16, new_screen) elif (player[1] < 16): new_screen = shift(DOWN, 16 - player[1], new_screen) rew = obs[0].reward done = obs[0].step_type == environment.StepType.LAST # Store transition in the replay buffer. replay_buffer.add(screen, action, rew, new_screen, float(done)) # replay_buffer_y.add(screen, action_y, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: obs = env.reset() player_relative = obs[0].observation["screen"][ _PLAYER_RELATIVE] screen = (player_relative == _PLAYER_NEUTRAL).astype( int) #+ path_memory player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] # Select all marines first env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) episode_rewards.append(0.0) #episode_minerals.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience # experience_y = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) # (obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y, batch_idxes_y) = experience_y else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None # obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y = replay_buffer_y.sample(batch_size) # weights_y, batch_idxes_y = np.ones_like(rewards_y), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) # td_errors_y = train_x(obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps # new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) # replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() # update_target_y() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}". format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
def learn(env, q_func, num_actions=4, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None): # Create all the functions necessary to train the model # Returns a session that will use <num_cpu> CPU's only sess = U.make_session(num_cpu=num_cpu) sess.__enter__() # Creates a placeholder for a batch of tensors of a given shape and dtyp def make_obs_ph(name): return U.BatchInput((64, 64), name=name) # act, train, update_target are function, debug is dict act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10) # Choose use prioritized replay buffer or normal replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1 exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # SC2的部分開始 # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None path_memory = np.zeros((64, 64)) obs = env.reset() # Select all marines obs = env.step( actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) # obs is tuple, obs[0] is 'pysc2.env.environment.TimeStep', obs[0].observation is dictionary. player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] # 利用path memory記憶曾經走過的軌跡 screen = player_relative + path_memory # 取得兩個陸戰隊的中心位置 player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True # np.array()[None] 是指多包一個維度在外面 e.g. [1] -> [[1]] action = act(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] reset = False coord = [player[0], player[1]] rew = 0 # 只有四個action,分別是上下左右,走過之後在路徑上留下一整排-3,目的是與水晶碎片的id(=3)相抵銷,代表有順利採集到。 path_memory_ = np.array(path_memory, copy=True) if (action == 0): # UP if (player[1] >= 16): coord = [player[0], player[1] - 16] path_memory_[player[1] - 16:player[1], player[0]] = -3 elif (player[1] > 0): coord = [player[0], 0] path_memory_[0:player[1], player[0]] = -3 elif (action == 1): # DOWN if (player[1] <= 47): coord = [player[0], player[1] + 16] path_memory_[player[1]:player[1] + 16, player[0]] = -3 elif (player[1] > 47): coord = [player[0], 63] path_memory_[player[1]:63, player[0]] = -3 elif (action == 2): # LEFT if (player[0] >= 16): coord = [player[0] - 16, player[1]] path_memory_[player[1], player[0] - 16:player[0]] = -3 elif (player[0] < 16): coord = [0, player[1]] path_memory_[player[1], 0:player[0]] = -3 elif (action == 3): # RIGHT if (player[0] <= 47): coord = [player[0] + 16, player[1]] path_memory_[player[1], player[0]:player[0] + 16] = -3 elif (player[0] > 47): coord = [63, player[1]] path_memory_[player[1], player[0]:63] = -3 # 更新path_memory path_memory = np.array(path_memory_) # 如果不能移動陸戰隊,想必是還沒圈選到陸戰隊,圈選他們 if _MOVE_SCREEN not in obs[0].observation["available_actions"]: obs = env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) # 移動陸戰隊 new_action = [ sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord]) ] # 取得環境給的observation obs = env.step(actions=new_action) # 這裡要重新取得player_relative,因為上一行的obs是個有複數資訊的tuple # 但我們要存入replay_buffer的只有降維後的screen畫面 player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] new_screen = player_relative + path_memory # 取得reward rew = obs[0].reward # StepType.LAST 代表done的意思 done = obs[0].step_type == environment.StepType.LAST # Store transition in the replay buffer replay_buffer.add(screen, action, rew, new_screen, float(done)) # 確實存入之後就能以新screen取代舊screen screen = new_screen episode_rewards[-1] += rew if done: # 重新取得敵我中立關係位置圖 obs = env.reset() # player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] # # 還是看不懂為何要加上path_memory # screen = player_relative + path_memory # player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() # player = [int(player_x.mean()), int(player_y.mean())] # # 圈選全部的陸戰隊(為何要在done observation做這件事情?) # env.step(actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) episode_rewards.append(0.0) # 清空path_memory path_memory = np.zeros((64, 64)) reset = True # 定期從replay buffer中抽experience來訓練,以及train target network if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None # 這裡的train來自deepq.build_train td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) # target network if t > learning_starts and t % target_network_update_freq == 0: # 同樣來自deepq.build_train # Update target network periodically update_target() # 下LOG追蹤reward mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() # 當model進步時,就存檔下來 if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
class Agent: def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size() self.action_size = self.env.get_action_size() print("Creation of the main QNetwork...") self.mainQNetwork = QNetwork(self.state_size, self.action_size, 'main') print("Main QNetwork created !\n") print("Creation of the target QNetwork...") self.targetQNetwork = QNetwork(self.state_size, self.action_size, 'target') print("Target QNetwork created !\n") self.buffer = PrioritizedReplayBuffer(parameters.BUFFER_SIZE, parameters.ALPHA) self.epsilon = parameters.EPSILON_START self.beta = parameters.BETA_START self.initial_learning_rate = parameters.LEARNING_RATE trainables = tf.trainable_variables() self.update_target_ops = updateTargetGraph(trainables) self.nb_ep = 1 self.best_run = -1e10 def pre_train(self): print("Beginning of the pre-training...") for i in range(parameters.PRE_TRAIN_STEPS): s = self.env.reset() done = False episode_step = 0 episode_reward = 0 while episode_step < parameters.MAX_EPISODE_STEPS and not done: a = random.randint(0, self.action_size - 1) s_, r, done, info = self.env.act(a) self.buffer.add(s, a, r, s_, done) s = s_ episode_reward += r episode_step += 1 if i % 100 == 0: print("\tPre-train step n", i) self.best_run = max(self.best_run, episode_reward) print("End of the pre training !") def run(self): print("Beginning of the run...") self.pre_train() self.total_steps = 0 self.nb_ep = 1 while self.nb_ep < parameters.TRAINING_STEPS: self.learning_rate = self.initial_learning_rate * \ (parameters.TRAINING_STEPS - self.nb_ep) / \ parameters.TRAINING_STEPS s = self.env.reset() episode_reward = 0 done = False memory = deque() discount_R = 0 episode_step = 0 max_step = parameters.MAX_EPISODE_STEPS + \ self.nb_ep // parameters.EP_ELONGATION # Render parameters self.env.set_render(self.nb_ep % parameters.RENDER_FREQ == 0) while episode_step < max_step and not done: if random.random() < self.epsilon: a = random.randint(0, self.action_size - 1) else: a = self.sess.run(self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s_, r, done, info = self.env.act(a) episode_reward += r memory.append((s, a, r, s_, done)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = r_mem for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT ** (i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) if episode_step % parameters.TRAINING_FREQ == 0: train_batch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) # Incr beta if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR feed_dict = {self.mainQNetwork.inputs: train_batch[0]} oldQvalues = self.sess.run(self.mainQNetwork.Qvalues, feed_dict=feed_dict) tmp = [0] * len(oldQvalues) for i, oldQvalue in enumerate(oldQvalues): tmp[i] = oldQvalue[train_batch[1][i]] oldQvalues = tmp feed_dict = {self.mainQNetwork.inputs: train_batch[3]} mainQaction = self.sess.run(self.mainQNetwork.predict, feed_dict=feed_dict) feed_dict = {self.targetQNetwork.inputs: train_batch[3]} targetQvalues = self.sess.run(self.targetQNetwork.Qvalues, feed_dict=feed_dict) # Done multiplier : # equals 0 if the episode was done # equals 1 else done_multiplier = (1 - train_batch[4]) doubleQ = targetQvalues[range(parameters.BATCH_SIZE), mainQaction] targetQvalues = train_batch[2] + \ parameters.DISCOUNT * doubleQ * done_multiplier errors = np.square(targetQvalues - oldQvalues) + 1e-6 self.buffer.update_priorities(train_batch[6], errors) feed_dict = {self.mainQNetwork.inputs: train_batch[0], self.mainQNetwork.Qtarget: targetQvalues, self.mainQNetwork.actions: train_batch[1], self.mainQNetwork.learning_rate: self.learning_rate} _ = self.sess.run(self.mainQNetwork.train, feed_dict=feed_dict) update_target(self.update_target_ops, self.sess) s = s_ episode_step += 1 self.total_steps += 1 # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY DISPLAYER.add_reward(episode_reward) # if episode_reward > self.best_run and \ # self.nb_ep > 50: # self.best_run = episode_reward # print("Save best", episode_reward) # SAVER.save('best') # self.play(1) self.total_steps += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print('Episode %2i, Reward: %7.3f, Steps: %i, Epsilon: %.3f' ', Max steps: %i, Learning rate: %g' % ( self.nb_ep, episode_reward, episode_step, self.epsilon, max_step, self.learning_rate)) # Save the model if self.nb_ep % parameters.SAVE_FREQ == 0: SAVER.save(self.nb_ep) self.nb_ep += 1 def play(self, number_run): print("Playing for", number_run, "runs") try: for i in range(number_run): self.env.set_render(True) s = self.env.reset() episode_reward = 0 done = False episode_step = 0 max_step = parameters.MAX_EPISODE_STEPS + \ self.nb_ep // parameters.EP_ELONGATION while episode_step < max_step and not done: a = self.sess.run(self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s, r, done, info = self.env.act(a) episode_reward += r episode_step += 1 print("Episode reward :", episode_reward) except KeyboardInterrupt as e: pass except Exception as e: print("Exception :", e) finally: self.env.set_render(False) print("End of the demo") self.env.close() def stop(self): self.env.close()
class Agent: def __init__(self, level_name): # level name == model name => set in train.py/play.py self.level_name = level_name # setup environment self.env = make_custom_env(disc_acts=True) # one hot encoded version of our actions self.possible_actions = np.array( np.identity(self.env.action_space.n, dtype=int).tolist()) # resest graph tf.reset_default_graph() # instantiate the DQNetwork self.DQNetwork = DDQNPrio(state_size, self.env.action_space.n, learning_rate, name="DDQNPrio") # instantiate the TargetNetwork self.TargetNetwork = DDQNPrio(state_size, self.env.action_space.n, learning_rate, name="TargetNetwork") # instantiate a linear decay schedule for the exploration rate self.epsilon_schedule = LinearSchedule(DECAY_STEPS, initial_p=EXPLORE_START, final_p=EXPLORE_STOP) # instantiate memory self.memory = PrioritizedReplayBuffer(size=memory_size, alpha=REPLAY_ALPHA) self.beta_schedule = LinearSchedule(REPLAY_BETA0_ITERS, initial_p=REPLAY_BETA0, final_p=1.0) # saver will help us save our model self.saver = tf.train.Saver(save_relative_paths=True) # setup tensorboard writer self.writer = tf.summary.FileWriter("logs/{}/".format(self.level_name)) # write initial loss tf.summary.scalar("Loss", self.DQNetwork.loss) self.write_op = tf.summary.merge_all() # set the initial number of lives self.lives = 4 # initialize the memory: fill the memory with experiences for i in range(pretrain_length): if i == 0: print("Initializing Memory with {} experiences!".format( pretrain_length)) # initialize the x0 - previous position - to 24 (initial position) x0 = 24 # initialize stuck variables stuck = False stuck_pos_cp = 24 # reset the environment state = self.env.reset() # Get next state, the rewards, done by taking a random action choice = random.randint(1, len(self.possible_actions)) - 1 action = self.possible_actions[choice] next_state, reward, done, info = self.env.step(choice) # compute the current x_position x1 = self._current_xpos(int(info['xpos']), int(info['xpos_multiplier'])) # compute the positional reward x0, reward = self.x_pos_reward(x1, x0, reward) # check if Mario is stuck if i % STUCK_STEPS == 0: stuck, reward = self.check_stuck(x1, stuck_pos_cp, reward) # check if Mario is still alive killed, reward = self.check_killed(int(info['lives']), reward) if done or killed or stuck: # we inished the episode next_state = np.zeros((HEIGHT, WIDTH, N_FRAMES), dtype=np.int) # add experience to memory self.memory.add(state, action, reward, next_state, done) # start a new episode state = self.env.reset() # reset x0 - previous position - to 24 (initial position) x0 = 24 # reset stuck variables stuck = False stuck_pos_cp = 24 else: # add experience to memory self.memory.add(state, action, reward, next_state, done) # our new state is now the next_state state = next_state def predict_action(self, sess, state, actions, t): # first we randomize a number exp_tradeoff = np.random.rand() # compute the current exploration probability # exponential decay # explore_probability = EXPLORE_STOP + (EXPLORE_START - EXPLORE_STOP) * np.exp(-DECAY_RATE * decay_step) # linear decay explore_probability = self.epsilon_schedule.value(t) if explore_probability > exp_tradeoff: # make a random action choice = random.randint(1, len(self.possible_actions)) - 1 action = self.possible_actions[choice] else: # transform LazyFrames into np array [None, HEIGHT, WIDTH, N_FRAMES] state = np.array(state) # estimate the Qs values state Qs = sess.run(self.DQNetwork.output, feed_dict={ self.DQNetwork.inputs_: state.reshape((1, *state.shape)) }) # take the biggest Q value (= best action) choice = np.argmax(Qs) action = self.possible_actions[choice] return action, choice, explore_probability def train(self): config = tf.ConfigProto() config.gpu_options.allow_growth = True # pylint: disable=no-member with tf.Session(config=config) as sess: # initialize the variables sess.run(tf.global_variables_initializer()) # initialize decay step and tau t = 0 tau = 0 # Update the parameters of our TargetNetwork with DQN_weights update_target = self.update_target_graph() sess.run(update_target) # score tracker score_tracker = [] print("Total Number of Steps:", TOTAL_TIMESTEPS) print("Full Priority Replay after {} steps".format( REPLAY_BETA0_ITERS)) print("Exploration Probability @ {} after: {} steps".format( EXPLORE_STOP, DECAY_STEPS)) for episode in range(TOTAL_EPISODES): # set step to 0 step = 0 # initialize the stuck_pos_cp to 24 (initial position) stuck_pos_cp = 24 # initialize the x0 - previous position - to 24 (initial position) x0 = 24 x1 = 24 # initialize stuck to False stuck = False # initialize rewards of episode episode_rewards = [0.0] # initialize episode loss episode_loss = [] # make a new episode and observe the first state state = self.env.reset() print("Episode:", episode) while step < MAX_STEPS: step += 1 t += 1 tau += 1 # predict an action action, choice, explore_probability = self.predict_action( sess, state, self.possible_actions, t) # perform the action and get the next_state, reward, and done information next_state, reward, done, info = self.env.step(choice) if episode_render: self.env.render() # check if Mario is still alive killed, reward = self.check_killed(int(info['lives']), reward) if not killed: # compute the current x_position x1 = self._current_xpos(int(info['xpos']), int(info['xpos_multiplier'])) # compute the positional reward x0, reward = self.x_pos_reward(x1, x0, reward) # check if Mario is stuck if step % STUCK_STEPS == 0: stuck, reward = self.check_stuck( x1, stuck_pos_cp, reward) # check if Mario has finished the level if done: reward = 0.0 print("\tEpisode ended!") if step == MAX_STEPS: print("\tMax Steps per Episode reached.") # TODO: implement time penality for taking too long.... if t % TIME_DECAY_PENALTY == 0: reward -= 1.0 # add the reward to total reward episode_rewards.append(reward) if killed or stuck or done or step == MAX_STEPS: # the episode ends so no next state next_state = np.zeros((HEIGHT, WIDTH, N_FRAMES), dtype=np.int) # set step = MAX_STEPS to end episode step = MAX_STEPS # get total reward of the episode total_reward = np.sum(episode_rewards) average_loss = np.mean(episode_loss) print("Episode:", episode, "Total Steps:", t, "Total reward:", total_reward, "Xpos:", x0, "Explore P:", explore_probability, "Average Training Loss:", average_loss) # remember the episode and the score score_tracker.append({ "episode": episode, "reward": total_reward, "xpos": x0 }) # store transition <s_i, a, r_{i+1}, s_{i+1}> in memory self.memory.add(state, action, reward, next_state, done) else: # store transition <s_i, a, r_{i+1}, s_{i+1}> in memory self.memory.add(state, action, reward, next_state, done) # s_{i} := s_{i+1} state = next_state #### LEARNING PART #### # Obtain random mini-batch from prioritized experience replay memory experience = self.memory.sample( batch_size, beta=self.beta_schedule.value(t)) (states_t, actions, rewards, states_tp1, dones, weights, idxes) = experience target_Qs_batch = [] # DOUBLE DQN Logic # Use DQNNetwork to select the action to take at next_state (a') (action with the highest Q-value) # Use TargetNetwork to calculate the Q_val of Q(s',a') # See below in set Q-targets # Get Q values for next_state q_next_state = sess.run( self.DQNetwork.output, feed_dict={self.DQNetwork.inputs_: states_tp1}) # Calculate Qtarget for all actions that state q_target_next_state = sess.run( self.TargetNetwork.output, feed_dict={self.TargetNetwork.inputs_: states_tp1}) # set Q-targets for i in range(batch_size): terminal = dones[i] # retrieve a' action from the DDQNPrio action = np.argmax(q_next_state[i]) # if we are in a terminal state i.e. if episode ends with s+1, target only equals reward if terminal: target_Qs_batch.append(rewards[i]) else: # otherwise take action a' from DDQNetwork # and set Qtarget = r + GAMMA * TargetNetwork(s',a') target = rewards[ i] + GAMMA * q_target_next_state[i][action] target_Qs_batch.append(target) # all mini batch targets targets = np.array([each for each in target_Qs_batch]) # run a forward and backward pass to get the TD-errors _, loss, absolute_errors = sess.run( [ self.DQNetwork.optimizer, self.DQNetwork.loss, self.DQNetwork.absolute_errors ], feed_dict={ self.DQNetwork.inputs_: states_t, self.DQNetwork.target_Q: targets, self.DQNetwork.actions_: actions, self.DQNetwork.importance_weights_ph_: weights }) # update the experience priorities according to absolute errors new_priorities = absolute_errors + REPLAY_EPS self.memory.update_priorities(idxes, new_priorities) # store loss episode_loss.append(loss) # write tf summaries summary = sess.run( self.write_op, feed_dict={ self.DQNetwork.inputs_: states_t, self.DQNetwork.target_Q: targets, self.DQNetwork.actions_: actions, self.DQNetwork.importance_weights_ph_: weights }) self.writer.add_summary(summary, episode) self.writer.flush() if tau > MAX_TAU: # Update the parameters of our TargetNetwork with DQN_weights update_target = self.update_target_graph() sess.run(update_target) tau = 0 print("Model updated") # # save model every 5 episodes # if episode % 5 == 0: # self.saver.save(sess, "./models/{0}/".format(self.level_name), global_step=episode) # print("Model Saved") sorted_scores = sorted(score_tracker, key=lambda ele: ele['xpos'], reverse=True) print("Sorted according to MAX XPOS\n", sorted_scores) sorted_scores = sorted(score_tracker, key=lambda ele: ele['reward'], reverse=True) print("Sorted according to MAX REWARD\n", sorted_scores) self.env.close() def _current_xpos(self, xpos, xpos_multiplier): """ Compute the current position of Mario. Inputs: - xpos: x_position (from 0 to 255) - xpos_multiplier: how many times the xpos has been looped Returns: - current x_position """ return xpos + xpos_multiplier * 255 def x_pos_reward(self, x1, x0, reward): """ Computes the positional reward; reward = x1 - x0 - x1: current position - x0: previous position Returns: - new previous position x0 = x1 - update reward """ reward += x1 - x0 return x1, reward def check_stuck(self, xpos, stuck_pos_cp, reward): """ Checks if Mario is stuck i.e. Mario's xpos has not changed since the last check. Inputs: - xpos: Mario's current x_position - stuck_pos_cp: Mario's x_position at the last check. - reward: the current step's reward Returns: - stuck: bool - True if Mario's x_position hasn't changed - reward: float - updated reward """ stuck = False if xpos == stuck_pos_cp: reward = 0 stuck = True print("\tMario is stuck! Restarting!", reward) return stuck, reward def check_killed(self, curr_n_lives, reward): """ Checks if Mario has died. If so adjusts the reward. Inputs: - curr_n_lives: Mario's current number of lives - reward: the current step's reward Returns: - killed: bool - True if Mario's has died. - reward: float - updated reward """ killed = False if curr_n_lives != self.lives: reward = PENALTY_DYING # update reward with dying penalty killed = True print("\tMario died! Restarting!", reward) return killed, reward def update_target_graph(self): """ # This function helps us to copy one set of variables to another # In our case we use it when we want to copy the parameters of DQN to Target_network # Thanks of the very good implementation of Arthur Juliani https://github.com/awjuliani """ # Get the parameters of our DDQNPrio from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "DDQNPrio") # Get the parameters of our Target_network to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "TargetNetwork") op_holder = [] # Update our target_network parameters with DQNNetwork parameters for from_var, to_var in zip(from_vars, to_vars): op_holder.append(to_var.assign(from_var)) return op_holder
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, thompson=True, prior="no prior", **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ blr_params = BLRParams() # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) # q_func = build_q_func(network, **network_kwargs) q_func = build_q_func_and_features(network, hiddens=[blr_params.feat_dim], **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) #deep mind optimizer # dm_opt = tf.train.RMSPropOptimizer(learning_rate=0.00025,decay=0.95,momentum=0.0,epsilon=0.00001,centered=True) act, train, update_target, debug, blr_additions = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer( learning_rate=lr ), #tf.train.RMSPropOptimizer(learning_rate=lr,momentum=0.95),# gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, thompson=thompson, double_q=thompson) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: # replay_buffer = ReplayBuffer(buffer_size) replay_buffer = ReplayBufferPerActionNew(buffer_size, env.action_space.n) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) num_actions = env.action_space.n if thompson: # Create parameters for Bayesian Regression feat_dim = blr_additions['feat_dim'] num_models = 5 print("num models is: {}".format(num_models)) w_sample = np.random.normal(loc=0, scale=blr_params.sigma, size=(num_actions, num_models, feat_dim)) w_mu = np.zeros((num_actions, feat_dim)) w_cov = np.zeros((num_actions, feat_dim, feat_dim)) for i in range(num_actions): w_cov[i] = blr_params.sigma * np.eye(feat_dim) phiphiT = np.zeros((num_actions, feat_dim, feat_dim), dtype=np.float32) phiphiT_inv = np.zeros((num_actions, feat_dim, feat_dim), dtype=np.float32) for i in range(num_actions): phiphiT[i] = (1 / blr_params.sigma) * np.eye(feat_dim) phiphiT_inv[i] = blr_params.sigma * np.eye(feat_dim) old_phiphiT_inv = [phiphiT_inv for i in range(5)] phiY = np.zeros((num_actions, feat_dim), dtype=np.float32) YY = np.zeros(num_actions) model_idx = np.random.randint(0, num_models, size=num_actions) blr_ops = blr_additions['blr_ops'] blr_ops_old = blr_additions['blr_ops_old'] last_layer_weights = np.zeros((feat_dim, num_actions)) phiphiT0 = np.copy(phiphiT) invgamma_a = [blr_params.a0 for _ in range(num_actions)] invgamma_b = [blr_params.a0 for _ in range(num_actions)] # Initialize the parameters and copy them to the target network. U.initialize() # update_target() if thompson: blr_additions['update_old']() if isinstance(blr_additions['update_old_target'], list): for update_net in reversed(blr_additions['update_old_target']): update_net() else: blr_additions['update_old_target']() if blr_additions['old_networks'] is not None: for key in blr_additions['old_networks'].keys(): blr_additions['old_networks'][key]["update"]() episode_rewards = [0.0] # episode_Q_estimates = [0.0] unclipped_episode_rewards = [0.0] # eval_rewards = [0.0] old_networks_num = 5 # episode_pseudo_count = [[0.0] for i in range(old_networks_num)] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) actions_hist = [0 for _ in range(num_actions)] actions_hist_total = [0 for _ in range(num_actions)] last_layer_weights_decaying_average = None blr_counter = 0 action_buffers_size = 512 action_buffers = [ ReplayBuffer(action_buffers_size) for _ in range(num_actions) ] eval_flag = False eval_counter = 0 for t in tqdm(range(total_timesteps)): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True if thompson: # for each action sample one of the num_models samples of w model_idx = np.random.randint(0, num_models, size=num_actions) cur_w = np.zeros((num_actions, feat_dim)) for i in range(num_actions): cur_w[i] = w_sample[i, model_idx[i]] action, estimate = act(np.array(obs)[None], cur_w[None]) actions_hist[int(action)] += 1 actions_hist_total[int(action)] += 1 else: action, estimate = act(np.array(obs)[None], update_eps=update_eps, **kwargs) env_action = action reset = False new_obs, unclipped_rew, done_list, _ = env.step(env_action) if isinstance(done_list, list): done, real_done = done_list else: done, real_done = done_list, done_list rew = np.sign(unclipped_rew) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) action_buffers[action].add(obs, action, rew, new_obs, float(done)) if action_buffers[action]._next_idx == 0: obses_a, actions_a, rewards_a, obses_tp1_a, dones_a = replay_buffer.get_samples( [i for i in range(action_buffers_size)]) phiphiT_a, phiY_a, YY_a = blr_ops_old(obses_a, actions_a, rewards_a, obses_tp1_a, dones_a) phiphiT[action] += phiphiT_a phiY[action] += phiY_a YY[action] += YY_a precision = phiphiT[action] + phiphiT0[action] cov = np.linalg.pinv(precision) mu = np.array( np.dot(cov, (phiY[action] + np.dot( phiphiT0[action], last_layer_weights[:, action])))) invgamma_a[action] += 0.5 * action_buffers_size b_upd = 0.5 * YY[action] b_upd += 0.5 * np.dot( last_layer_weights[:, action].T, np.dot(phiphiT0[action], last_layer_weights[:, action])) b_upd -= 0.5 * np.dot(mu.T, np.dot(precision, mu)) invgamma_b[action] += b_upd # old_phiphiT_inv_a = [np.tile(oppTi[action], (action_buffers_size,1,1)) for oppTi in old_phiphiT_inv] # old_pseudo_count = blr_additions['old_pseudo_counts'](obses_a, *old_phiphiT_inv_a) # old_pseudo_count = np.sum(old_pseudo_count, axis=-1) # for i in range(old_networks_num): # idx = ((blr_counter-1)-i) % old_networks_num # arrange networks from newest to oldest # episode_pseudo_count[i][-1] += old_pseudo_count[idx] # if real_done: # for a in range(num_actions): # if action_buffers[a]._next_idx != 0: # obses_a, actions_a, rewards_a, obses_tp1_a, dones_a = replay_buffer.get_samples([i for i in range(action_buffers[a]._next_idx)]) # nk = obses_a.shape[0] # # # old_phiphiT_inv_a = [np.tile(oppTi[action],(nk,1,1)) for oppTi in old_phiphiT_inv] # # old_pseudo_count = blr_additions['old_pseudo_counts'](obses_a, *old_phiphiT_inv_a) # # old_pseudo_count = np.sum(old_pseudo_count, axis=-1) # # for i in range(old_networks_num): # # idx = ((blr_counter-1)-i) % old_networks_num # arrange networks from newest to oldest # # episode_pseudo_count[i][-1] += old_pseudo_count[idx] # # phiphiT_a, phiY_a, YY_a = blr_ops_old(obses_a, actions_a, rewards_a, obses_tp1_a, dones_a) # phiphiT[a] += phiphiT_a # phiY[a] += phiY_a # YY[a] += YY_a # # action_buffers[a]._next_idx = 0 obs = new_obs episode_rewards[-1] += rew # episode_Q_estimates[-1] += estimate unclipped_episode_rewards[-1] += unclipped_rew if t % 250000 == 0 and t > 0: eval_flag = True if done: obs = env.reset() episode_rewards.append(0.0) # episode_Q_estimates.append(0.0) reset = True if real_done: unclipped_episode_rewards.append(0.0) # for i in range(old_networks_num): # episode_pseudo_count[i].append(0.0) # every time full episode ends run eval episode if eval_flag: te = 0 print("running evaluation") eval_rewards = [0.0] while te < 125000: # for te in range(125000): real_done = False print(te) while not real_done: action, _ = blr_additions['eval_act']( np.array(obs)[None]) new_obs, unclipped_rew, done_list, _ = env.step( action) if isinstance(done_list, list): done, real_done = done_list else: done, real_done = done_list, done_list eval_rewards[-1] += unclipped_rew obs = new_obs te += 1 if done: obs = env.reset() if real_done: eval_rewards.append(0.0) obs = env.reset() eval_rewards.pop() mean_reward_eval = round(np.mean(eval_rewards), 2) logger.record_tabular("mean eval episode reward", mean_reward_eval) logger.dump_tabular() eval_flag = False # eval_counter += 1 # if eval_counter % 10 == 0: # if t > learning_starts: # real_done = False # while not real_done: # action, _ = blr_additions['eval_act'](np.array(obs)[None]) # new_obs, unclipped_rew, done_list, _ = env.step(action) # done, real_done = done_list # eval_rewards[-1] += unclipped_rew # obs = new_obs # eval_rewards.append(0.0) # obs = env.reset() if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if thompson: if t > learning_starts and t % ( blr_params.update_w * target_network_update_freq) == 0: phiphiT_inv = np.zeros_like(phiphiT) for i in range(num_actions): try: phiphiT_inv[i] = np.linalg.inv(phiphiT[i]) except: phiphiT_inv[i] = np.linalg.pinv(phiphiT[i]) old_phiphiT_inv[blr_counter % 5] = phiphiT_inv llw = sess.run(blr_additions['last_layer_weights']) phiphiT, phiY, phiphiT0, last_layer_weights, YY, invgamma_a, invgamma_b = BayesRegression( phiphiT, phiY, replay_buffer, blr_additions['feature_extractor'], blr_additions['target_feature_extractor'], num_actions, blr_params, w_mu, w_cov, llw, prior=prior, blr_ops=blr_additions['blr_ops'], sdp_ops=blr_additions['sdp_ops'], old_networks=blr_additions['old_networks'], blr_counter=blr_counter, old_feat=blr_additions['old_feature_extractor'], a=invgamma_a) blr_counter += 1 if seed is not None: print('seed is {}'.format(seed)) blr_additions['update_old']() if isinstance(blr_additions['update_old_target'], list): for update_net in reversed( blr_additions['update_old_target']): update_net() else: blr_additions['update_old_target']() if blr_additions['old_networks'] is not None: blr_additions['old_networks'][blr_counter % 5]["update"]() if thompson: if t > 0 and t % blr_params.sample_w == 0: # sampling num_models samples of w if debug: print(actions_hist) else: if t % 10000 == 0: print(actions_hist) actions_hist = [0 for _ in range(num_actions)] # if t > 1000000: adaptive_sigma = True # else: # adaptive_sigma = False cov_norms = [] cov_norms_no_sigma = [] sampled_sigmas = [] for i in range(num_actions): if prior == 'no prior' or last_layer_weights is None: cov = np.linalg.inv(phiphiT[i]) mu = np.array(np.dot(cov, phiY[i])) elif prior == 'last layer': cov = np.linalg.inv(phiphiT[i]) mu = np.array( np.dot(cov, (phiY[i] + (1 / blr_params.sigma) * last_layer_weights[:, i]))) elif prior == 'single sdp': try: cov = np.linalg.inv(phiphiT[i] + phiphiT0) except: print("singular matrix using pseudo inverse") cov = np.linalg.pinv(phiphiT[i] + phiphiT0) mu = np.array( np.dot(cov, (phiY[i] + np.dot( phiphiT0, last_layer_weights[:, i])))) elif prior == 'sdp' or prior == 'linear': try: cov = np.linalg.inv(phiphiT[i] + phiphiT0[i]) except: # print("singular matrix") cov = np.linalg.pinv(phiphiT[i] + phiphiT0[i]) mu = np.array( np.dot(cov, (phiY[i] + np.dot( phiphiT0[i], last_layer_weights[:, i])))) else: print("No valid prior") exit(0) for j in range(num_models): if adaptive_sigma: sigma = invgamma_b[i] * invgamma.rvs( invgamma_a[i]) else: sigma = blr_params.sigma try: w_sample[i, j] = np.random.multivariate_normal( mu, sigma * cov) except: w_sample[i, j] = mu cov_norms.append(np.linalg.norm(sigma * cov)) cov_norms_no_sigma.append(np.linalg.norm(cov)) sampled_sigmas.append(sigma) if t % 7 == 0: for i, cov_norm in enumerate(cov_norms): print( "cov*sigma norm for action {}: {}, visits: {}". format(i, cov_norm, len(replay_buffer.buffers[i]))) # if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. # print(update_target) # update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) mean_10ep_reward = round(np.mean(episode_rewards[-11:-1]), 1) mean_100ep_reward_unclipped = round( np.mean(unclipped_episode_rewards[-101:-1]), 1) mean_10ep_reward_unclipped = round( np.mean(unclipped_episode_rewards[-11:-1]), 1) # mean_100ep_reward_eval = round(np.mean(eval_rewards[-101:-1]), 1) # mean_10ep_reward_eval = round(np.mean(eval_rewards[-11:-1]), 1) # mean_100ep_est = round(np.mean(episode_Q_estimates[-101:-1]), 1) # mean_10ep_est = round(np.mean(episode_Q_estimates[-11:-1]), 1) num_episodes = len(episode_rewards) # mean_10ep_pseudo_count = [0.0 for _ in range(old_networks_num)] # mean_100ep_pseudo_count = [0.0 for _ in range(old_networks_num)] # for i in range(old_networks_num): # mean_10ep_pseudo_count[i] = round(np.log(np.mean(episode_pseudo_count[i][-11:-1])), 1) # mean_100ep_pseudo_count[i] = round(np.log(np.mean(episode_pseudo_count[i][-101:-1])), 1) # if done and print_freq is not None and len(episode_rewards) % print_freq == 0: if t % 10000 == 0 and t > 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 10 episode reward", mean_10ep_reward) logger.record_tabular("mean 100 unclipped episode reward", mean_100ep_reward_unclipped) logger.record_tabular("mean 10 unclipped episode reward", mean_10ep_reward_unclipped) # logger.record_tabular("mean 100 eval episode reward", mean_100ep_reward_eval) # logger.record_tabular("mean 10 eval episode reward", mean_10ep_reward_eval) # for i in range(old_networks_num): # logger.record_tabular("mean 10 episode pseudo count for -{} net".format(i+1), mean_10ep_pseudo_count[i]) # logger.record_tabular("mean 100 episode pseudo count for -{} net".format(i+1), mean_100ep_pseudo_count[i]) # logger.record_tabular("mean 100 episode Q estimates", mean_100ep_est) # logger.record_tabular("mean 10 episode Q estimates", mean_10ep_est) logger.dump_tabular() if t % 7 == 0: print("len(unclipped_episode_rewards)") print(len(unclipped_episode_rewards)) print("len(episode_rewards)") print(len(episode_rewards)) if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training param_noise: bool where param noise should be present callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): print("ENV.OBSERVATION_SPACE: {}".format(env.observation_space)) return ObservationInput(env.observation_space, name=name) act, train, update_target, debug = build_graph.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True mean_100ep_reward = 0 rew = 0 num_episodes = 0 with tempfile.TemporaryDirectory() as td: model_file = os.path.join(td, "model") print("model_file : %s" % model_file) model_saved = False history = np.stack((obs, obs, obs, obs), axis=2) history = np.reshape([history], (1, 84, 84, 4)) for t in range(max_timesteps): if callback is not None: if callback(locals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True reshape_obs = np.reshape([obs], (1, 84, 84, 1)) history = np.append(reshape_obs, history[:, :, :, :3], axis=3) processed_obs = np.reshape([history], (84, 84, 4)) action = act(np.array(processed_obs)[None], update_eps=update_eps, **kwargs)[0] reset = False new_obs, rew, done, _ = env.step(action) obs = new_obs next_state = np.reshape([new_obs], (1, 84, 84, 1)) next_history = np.append(next_state, history[:, :, :, :3], axis=3) processed_new_obs = np.reshape([history], (84, 84, 4)) # Store transition in the replay buffer. replay_buffer.add(processed_obs, action, rew, processed_new_obs, float(done)) episode_rewards[-1] += rew epi_reward = episode_rewards[-1] if done: obs = env.reset() episode_rewards.append(0.0) reset = True history = np.stack( (next_state, next_state, next_state, next_state), axis=2) history = np.reshape([history], (1, 84, 84, 4)) else: history = next_history if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", epi_reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0: if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) print( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act, act_params)
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(num_iters) + exploration.value(num_iters) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = ( num_iters % args.param_noise_update_freq == 0) action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] reset = False new_obs, rew, done, _ = env.step(action) replay_buffer.add(obs, action, rew, new_obs, float(done)) total_reward += rew obs = new_obs if done: num_iters_since_reset = 0 total_reward = 0 obs = env.reset() reset = True if (num_iters > max(5 * args.batch_size, args.replay_buffer_size // 20) and num_iters % args.learning_freq == 0): # Sample a bunch of transitions from replay buffer if args.prioritized: experience = replay_buffer.sample( args.batch_size, beta=beta_schedule.value(num_iters))
def run_mountaincar_and_save_results(lr, kappa, timesteps_per_update_target, timesteps_per_action_taken, gamma, prioritize, alpha, beta, folder_path): episode_length = [0] Q_errors = [] state_errors = [] grad_sums_of_squares = [] with tf.variable_scope("Q"): Q_params, Q_function = Q_model(256, tf.nn.softplus) with tf.variable_scope('Q_target'): Q_params_target, Q_function_target = Q_model(256, tf.nn.softplus) obses = tf.placeholder(tf.float32, shape=[None, 2]) actions = tf.placeholder(tf.int32, shape=[None]) rewards = tf.placeholder(tf.float32, shape=[None]) next_obses = tf.placeholder(tf.float32, shape=[None, 2]) dones = tf.placeholder(tf.float32, shape=[None]) weights = tf.placeholder(tf.float32, shape=[None]) Q_values_target = tf.placeholder(tf.float32, shape=[None]) Q_function_obses = Q_function(obses) Q_values_per_action = Q_function_obses[0] Q_difference = tf.reduce_sum(Q_values_per_action * tf.one_hot(actions, 3), axis=1) - Q_values_target state_prediction = Q_function_obses[1] if prioritize: Q_error = tf.reduce_mean(tf.square(Q_difference) * weights) state_error = tf.reduce_mean( tf.square( tf.reduce_sum(state_prediction * tf.expand_dims(tf.one_hot(actions, 3), 1), axis=2) - next_obses) * tf.expand_dims(weights, 1)) else: Q_error = tf.reduce_mean(tf.square(Q_difference)) state_error = tf.reduce_mean( tf.square( tf.reduce_sum(state_prediction * tf.expand_dims(tf.one_hot(actions, 3), 1), axis=2) - next_obses)) total_error = Q_error if kappa > 0: total_error += kappa * state_error Q_actions = tf.argmax(Q_values_per_action, axis=1) Q_values_target_Bellman = rewards + (1 - dones) * gamma * tf.reduce_sum( tf.one_hot(tf.argmax(Q_function(next_obses)[0], axis=1), 3) * Q_function_target(next_obses)[0], axis=1) update_target = tf.group(*[ tf.assign(Q_param_target, Q_param) for Q_param, Q_param_target in zip(Q_params, Q_params_target) ]) lr_variable = tf.get_variable('lr', (), initializer=tf.constant_initializer(0.1)) grads = tf.gradients(total_error, Q_params) grad_sum_of_squares = sum( [tf.reduce_sum(x * x) for x in grads if x is not None]) Q_Adam = tf.train.AdamOptimizer(learning_rate=lr_variable) Q_minimize = Q_Adam.minimize(Q_error) total_minimize = Q_Adam.minimize(total_error) sess = tf.Session() sess.run(tf.global_variables_initializer()) Q_table, memory, N, env, high, low = fill_Q_table() obses_valid_0 = np.array(sum([[i] * N * 3 for i in range(N)], [])) obses_valid_1 = np.array(sum([[i] * 3 for i in range(N)], []) * N) actions_valid = np.array([0, 1, 2] * N * N) obses_valid = (np.stack( (obses_valid_0, obses_valid_1), axis=1) + 0.5) / N * (high - low) + low Q_values_target_valid = Q_table[obses_valid_0, obses_valid_1, actions_valid] weights_valid = np.ones(N * N * 3) def valid_error(): return sess.run(Q_error, feed_dict={ obses: obses_valid, actions: actions_valid, Q_values_target: Q_values_target_valid, weights: weights_valid }) valid_error_current = 1e20 valid_error_new = valid_error() while valid_error_new < 0.999 * valid_error_current: valid_error_current = valid_error_new print('valid error = %.6f' % valid_error_current) sess.run(tf.assign(lr_variable, valid_error_current / 1000)) for _ in range(64): sess.run(Q_minimize, feed_dict={ obses: obses_valid, actions: actions_valid, Q_values_target: Q_values_target_valid, weights: weights_valid }) valid_error_new = valid_error() print('valid error new = %.6f' % valid_error_new) sess.run(tf.assign(lr_variable, lr)) obs = env.reset() if prioritize: replay_buffer = PrioritizedReplayBuffer(50000, alpha) for mem in memory: replay_buffer.add(*mem) episode_rew = 0 for t in range(100000): if t % timesteps_per_action_taken == 0: action = sess.run(Q_actions, feed_dict={obses: obs[None]})[0] next_obs, rew, done, _ = env.step(action) episode_rew += rew if prioritize: replay_buffer.add(obs, action, rew, next_obs, done) else: memory.append((obs, action, rew, next_obs, done)) if len(memory) > 50000: del memory[0] obs = next_obs episode_length[-1] += 1 if done: obs = env.reset() print('episode reward = %d' % episode_rew) episode_rew = 0 episode_length.append(0) if prioritize: beta_current = (beta * (100000 - t) + t) / 100000 obses_current, actions_current, rewards_current, next_obses_current, dones_current, weights_current, idxes_current = replay_buffer.sample( 32, beta_current) else: idxes = [np.random.randint(len(memory)) for _ in range(32)] tuples = [memory[idx] for idx in idxes] obses_current = np.array([s[0] for s in tuples]) actions_current = np.array([s[1] for s in tuples]) rewards_current = np.array([s[2] for s in tuples]) next_obses_current = np.array([s[3] for s in tuples]) dones_current = np.array([float(s[4]) for s in tuples]) weights_current = np.ones(32) Q_values_target_current = sess.run(Q_values_target_Bellman, feed_dict={ rewards: rewards_current, next_obses: next_obses_current, dones: dones_current }) if prioritize: new_weights = np.abs( sess.run(Q_difference, feed_dict={ obses: obses_current, actions: actions_current, Q_values_target: Q_values_target_current, next_obses: next_obses_current })) + 1e-6 replay_buffer.update_priorities(idxes_current, new_weights) Q_errors.append( sess.run(Q_error, feed_dict={ obses: obses_current, actions: actions_current, Q_values_target: Q_values_target_current, next_obses: next_obses_current, weights: weights_current }).astype(np.float64)) state_errors.append( sess.run(state_error, feed_dict={ obses: obses_current, actions: actions_current, Q_values_target: Q_values_target_current, next_obses: next_obses_current, weights: weights_current }).astype(np.float64)) grad_sums_of_squares.append( sess.run(grad_sum_of_squares, feed_dict={ obses: obses_current, actions: actions_current, Q_values_target: Q_values_target_current, next_obses: next_obses_current, weights: weights_current }).astype(np.float64)) sess.run(total_minimize, feed_dict={ obses: obses_current, actions: actions_current, Q_values_target: Q_values_target_current, next_obses: next_obses_current, weights: weights_current }) if t % timesteps_per_update_target == 0: sess.run(update_target) if t % 1000 == 0: print('t = %d' % t) print('saving progress and params...') if not os.path.exists(folder_path + 'params/'): os.makedirs(folder_path + 'params/') with open(folder_path + 'progress.json', 'w') as f: data = { 'episode_length': episode_length, 'Q_errors': Q_errors, 'state_errors': state_errors, 'grad_sums_of_squares': grad_sums_of_squares } json.dump(data, f) saver = tf.train.Saver({v.name: v for v in Q_params}) saver.save(sess, folder_path + 'params/params.ckpt') with open(folder_path + 'params/params.pkl', 'wb') as f: cloudpickle.dump([sess.run(param) for param in Q_params], f) print('saved...') # tidy up sess.close() tf.reset_default_graph()
def learn(env, q_func, beta1=0.9, beta2=0.999, epsilon=1e-8, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, exploration_schedule=None, start_lr=5e-4, end_lr=5e-4, start_step=0, end_step=1, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, model_directory=None, lamda=0.1): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer beta1: float beta1 parameter for adam beta2: float beta2 parameter for adam epsilon: float epsilon parameter for adam max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability exploration_schedule: Schedule a schedule for exploration chance train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return ObservationInput(env.observation_space, name=name) global_step = tf.Variable(0, trainable=False) lr = interpolated_decay(start_lr, end_lr, global_step, start_step, end_step) act, train, update_target, debug = multiheaded_build_graph.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr, beta1=beta1, beta2=beta2, epsilon=epsilon), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, global_step=global_step, lamda=lamda, ) tf.summary.FileWriter(logger.get_dir(), graph_def=sess.graph_def) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. if exploration_schedule is None: exploration = LinearSchedule(schedule_timesteps=int( exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) else: exploration = exploration_schedule # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False if model_directory is None: model_directory = pathlib.Path(td) model_file = str(model_directory / "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] if isinstance(env.action_space, gym.spaces.MultiBinary): env_action = np.zeros(env.action_space.n) env_action[action] = 1 else: env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) act.save(str(model_directory / "act_model.pkl")) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return act
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=5, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the trained model from. (default: None)(used in test stage) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) med_libs = MedLibs() '''Define Q network inputs: observation place holder(make_obs_ph), num_actions, scope, reuse outputs(tensor of shape batch_size*num_actions): values of each action, Q(s,a_{i}) ''' q_func = build_q_func(network, **network_kwargs) ''' To put observations into a placeholder ''' # TODO: Can only deal with Discrete and Box observation spaces for now # observation_space = env.observation_space (default) # Use sub_obs_space instead observation_space = med_libs.subobs_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) ''' Customize action ''' # TODO: subset of action space. action_dim = med_libs.sub_act_dim ''' Returns: deepq.build_train() act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. act is computed by [build_act] or [build_act_with_param_noise] train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. update_target: () -> () copy the parameters from optimized Q function to the target Q function. debug: {str: function} a bunch of functions to print debug data like q_values. ''' act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=action_dim, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, double_q=True, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': action_dim, } '''Contruct an act object using ActWrapper''' act = ActWrapper(act, act_params) ''' Create the replay buffer''' if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None '''Create the schedule for exploration starting from 1.''' exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) ''' Initialize all the uninitialized variables in the global scope and copy them to the target network. ''' U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() sub_obs = med_libs.custom_obs(obs) # TODO: customize observations pre_obs = obs reset = True mydict = med_libs.action_dict already_starts = False with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: # load_path: a trained model/policy load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) ''' Training loop starts''' t = 0 while t < total_timesteps: if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True ''' Choose action: take action and update exploration to the newest value ''' # TODO: Mixed action strategy # Normal status, action is easily determined by rules, use [obs] action = med_libs.simple_case_action(obs) # Distraction status, action is determined by Q, with [sub_obs] if action == -10: action = act(np.array(sub_obs)[None], update_eps=update_eps, **kwargs)[0] action = med_libs.action_Q_env( action ) # TODO:action_Q_env, from Q_action(0~2) to env_action(2~4) reset = False ''' Step action ''' new_obs, rew, done, d_info = env.step(action) d_att_last = int(pre_obs[0][0]) d_att_now = int(obs[0][0]) d_att_next = int(new_obs[0][0]) ''' Store transition in the replay buffer.''' pre_obs = obs obs = new_obs sub_new_obs = med_libs.custom_obs(new_obs) if (d_att_last == 0 and d_att_now == 1) and not already_starts: already_starts = True if already_starts and d_att_now == 1: replay_buffer.add(sub_obs, action, rew, sub_new_obs, float(done)) episode_rewards[-1] += rew # Sum of rewards t = t + 1 print( '>> Iteration:{}, State[d_att,cd_activate,L4_available,ssl4_activate,f_dc]:{}' .format(t, sub_obs)) print( 'Dis_Last:{}, Dis_Now:{}, Dis_Next:{},Reward+Cost:{}, Action:{}' .format( d_att_last, d_att_now, d_att_next, rew, list(mydict.keys())[list( mydict.values()).index(action)])) # update sub_obs sub_obs = sub_new_obs # Done and Reset if done: print('Done infos: ', d_info) print('======= end =======') obs = env.reset() sub_obs = med_libs.custom_obs(obs) # TODO: custom obs pre_obs = obs # TODO: save obs at t-1 already_starts = False episode_rewards.append(0.0) reset = True # Update the Q network parameters if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None # Calculate td-errors actions = med_libs.action_env_Q( actions ) # TODO:action_env_Q, from env_action(2~4) to Q_action(0~2) td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically, copy weights of Q to target Q update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def learn(env, q_func, num_actions=3, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None, demo_replay=[] ): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput((64, 64), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10 ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() # Select all marines first player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = player_relative obs = common.init(env, obs) group_id = 0 reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True # custom process for DefeatZerglingsAndBanelings obs, screen, player = common.select_marine(env, obs) action = act(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] reset = False rew = 0 new_action = None obs, new_action = common.marine_action(env, obs, player, action) army_count = env._obs.observation.player_common.army_count try: if army_count > 0 and _ATTACK_SCREEN in obs[0].observation["available_actions"]: obs = env.step(actions=new_action) else: new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) except Exception as e: #print(e) 1 # Do nothing player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] new_screen = player_relative rew += obs[0].reward done = obs[0].step_type == environment.StepType.LAST selected = obs[0].observation["screen"][_SELECTED] player_y, player_x = (selected == _PLAYER_FRIENDLY).nonzero() if(len(player_y)>0): player = [int(player_x.mean()), int(player_y.mean())] if(len(player) == 2): if(player[0]>32): new_screen = common.shift(LEFT, player[0]-32, new_screen) elif(player[0]<32): new_screen = common.shift(RIGHT, 32 - player[0], new_screen) if(player[1]>32): new_screen = common.shift(UP, player[1]-32, new_screen) elif(player[1]<32): new_screen = common.shift(DOWN, 32 - player[1], new_screen) # Store transition in the replay buffer. replay_buffer.add(screen, action, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: print("Episode Reward : %s" % episode_rewards[-1]) obs = env.reset() player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = player_relative group_list = common.init(env, obs) # Select all marines first #env.step(actions=[sc2_actions.FunctionCall(_SELECT_UNIT, [_SELECT_ALL])]) episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
def pok_learn(env, q_func, lr=5e-4, max_timesteps=1000, #DP DEL 000 buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1500, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape #ok def make_obs_ph(name): return U.BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, #ok optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, #ok } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. #DP - don't need this # exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), # initial_p=1.0, # final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] td_error_list = [] saved_mean_reward = None saved_td_error = None obs = env.reset() #ok reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): #DP this somehow uses exploration break #DP - not needed # Take action and update exploration to the newest value # kwargs = {} # if not param_noise: # update_eps = exploration.value(t) # update_param_noise_threshold = 0. # else: # update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. # update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) # kwargs['reset'] = reset # kwargs['update_param_noise_threshold'] = update_param_noise_threshold # kwargs['update_param_noise_scale'] = True action = np.int64(env.action_space.sample()) #act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] #DP this is what we replace - what does act do?? env_action = action #DP action reset = False new_obs, rew, done, _ = env.step(env_action) #ok # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None # #DP EDIT # print("at step t " + str(t)) # print("printing obses_t, actions, rewards, obses_tp1, dones, weights") # print(obses_t, actions, rewards, obses_tp1, dones, weights) # print("%"*30) td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) td_error_list.append(np.mean(np.abs(td_errors))) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() #DP - convert to TD errors? mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) if len(td_error_list) > 1000 / batch_size: mean_1000step_tderror = round(np.mean(td_error_list[-int(round(100/batch_size)):-1]),5) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward (how to interpret)?", mean_100ep_reward) if len(td_error_list) > 1000 / batch_size: logger.record_tabular("mean abs 1000 td errs", mean_1000step_tderror) #DP logger.record_tabular("0% time spent exploring since using handlogs", 0) #int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_td_error is None or mean_1000step_tderror < saved_td_error: #mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to new avg trailing td error: {} -> {}".format( #DP saved_mean_reward, mean_1000step_tderror)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward saved_td_error = mean_1000step_tderror import pdb; pdb.set_trace() if model_saved: if print_freq is not None: logger.log("Restored model with mean reward & error: {} and {}".format(saved_mean_reward, saved_td_error)) U.load_state(model_file) return act
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act, debug['q_func'], debug['obs']
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph if(env.is_single): observation_space_shape = env.observation_space.shape num_actions = env.action_space.n else: observation_space_shape = env.observation_space[0].shape num_actions = env.action_space[0].n num_agents=env.agentSize def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size*num_agents, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size*num_agents) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action=[] qval=[] for i in range(num_agents): prediction=act(np.array(obs[i])[None], update_eps=update_eps, **kwargs) #print(prediction[0],prediction[1][0]) action.append(prediction[0][0]) qval.append(prediction[1][0]) env_action = action reset = False new_obs, rew, done, _ = env.step(env_action,qval) # Store transition in the replay buffer. for i in range(num_agents): replay_buffer.add(obs[i], action[i], rew, new_obs[i], float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t*num_agents % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None #print(obses_t.shape,actions.shape,rewards.shape,obses_tp1.shape,dones.shape) td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_state(model_file) return act,episode_rewards