def learn_continuous_tasks_noisy(env, q_func, env_name, dir_path, time_stamp, total_num_episodes, num_actions_pad=33, lr=1e-4, grad_norm_clipping=10, max_timesteps=int(1e8), buffer_size=int(1e6), train_freq=1, batch_size=64, print_freq=10, learning_starts=1000, gamma=0.99, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=int(1e8), num_cpu=16, epsilon_greedy=False, timesteps_std=1e6, initial_std=0.4, final_std=0.05, eval_freq=100, n_eval_episodes=10, eval_std=0.01, log_index=0, log_prefix='q', loss_type="L2", model_file='./', callback=None): """Train a branching deepq model to solve continuous control tasks via discretization. Current assumptions in the implementation: - for solving continuous control domains via discretization (can be adjusted to be compatible with naturally disceret-action domains using 'env.action_space.n') - uniform number of sub-actions per action dimension (can be generalized to heterogeneous number of sub-actions across branches) Parameters ------- env : gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions_pad: int number of sub-actions per action dimension (= num of discretization grains/bars + 1) lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimize for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed 0.1 for dqn-baselines exploration_final_eps: float final value of random action probability 0.02 for dqn-baselines train_freq: int update the model every `train_freq` steps. batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor grad_norm_clipping: int set None for no clipping target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the unified TD error for updating priorities. Erratum: The camera-ready copy of this paper incorrectly reported 1e-8. The value used to produece the results is 1e8. num_cpu: int number of cpus to use for training dir_path: str path for logs and results to be stored in callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput(env.observation_space.shape, name=name) print('Observation shape:' + str(env.observation_space.shape)) num_action_grains = num_actions_pad - 1 num_action_dims = env.action_space.shape[0] num_action_streams = num_action_dims num_actions = num_actions_pad * num_action_streams # total numb network outputs for action branching with one action dimension per branch print('Number of actions in total:' + str(num_actions)) act, q_val, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, num_action_streams=num_action_streams, batch_size=batch_size, optimizer_name="Adam", learning_rate=lr, grad_norm_clipping=grad_norm_clipping, gamma=gamma, double_q=True, scope="deepq", reuse=None, loss_type="L2") print('TRAIN VARS:') print(tf.trainable_variables()) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, 'num_action_streams': num_action_streams, } print('Create the log writer for TensorBoard visualizations.') log_dir = "{}/tensorboard_logs/{}".format(dir_path, env_name) if not os.path.exists(log_dir): os.makedirs(log_dir) score_placeholder = tf.placeholder(tf.float32, [], name='score_placeholder') tf.summary.scalar('score', score_placeholder) lr_constant = tf.constant(lr, name='lr_constant') tf.summary.scalar('learning_rate', lr_constant) eval_placeholder = tf.placeholder(tf.float32, [], name='eval_placeholder') eval_summary = tf.summary.scalar('evaluation', eval_placeholder) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None if epsilon_greedy: approximate_num_iters = 2e6 / 4 exploration = PiecewiseSchedule([(0, 0.1), (approximate_num_iters / 50, 0.01), (approximate_num_iters / 5, 0.01)], outside_value=0.01) else: exploration = ConstantSchedule(value=0.0) # greedy policy std_schedule = LinearSchedule(schedule_timesteps=timesteps_std, initial_p=initial_std, final_p=final_std) # Initialize the parameters and copy them to the target network. U.initialize() update_target() # Initialize the parameters used for converting branching, discrete action indeces to continuous actions low = env.action_space.low high = env.action_space.high actions_range = np.subtract(high, low) print('###################################') print(low) print(high) print('###################################') episode_rewards = [] reward_sum = 0.0 time_steps = [0] time_spent_exploring = [0] prev_time = time.time() n_trainings = 0 # Open a dircetory for recording results results_dir = "{}/results/{}".format(dir_path, env_name) if not os.path.exists(results_dir): os.makedirs(results_dir) displayed_mean_reward = None score_timesteps = [] game_scores = [] def evaluate(step, episode_number): global max_eval_reward_mean, model_saved print('Evaluate...') eval_reward_sum = 0.0 # Run evaluation episodes for eval_episode in range(n_eval_episodes): obs = env.reset() done = False while not done: # Choose action action_idxes = np.array( act(np.array(obs)[None], stochastic=False)) # deterministic actions_greedy = action_idxes / num_action_grains * actions_range + low if eval_std == 0.0: action = actions_greedy else: action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: a_stoch = np.random.normal(loc=a_greedy, scale=eval_std) a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action.append(a_stoch) out_of_range_action = False # Step obs, rew, done, _ = env.step(action) eval_reward_sum += rew # Average the rewards and log eval_reward_mean = eval_reward_sum / n_eval_episodes print(eval_reward_mean, 'over', n_eval_episodes, 'episodes') game_scores.append(eval_reward_mean) score_timesteps.append(step) if max_eval_reward_mean is None or eval_reward_mean > max_eval_reward_mean: logger.log( "Saving model due to mean eval increase: {} -> {}".format( max_eval_reward_mean, eval_reward_mean)) U.save_state(model_file) model_saved = True max_eval_reward_mean = eval_reward_mean intact = ActWrapper(act, act_params) intact.save(model_file + "_" + str(episode_number) + "_" + str(int(np.round(max_eval_reward_mean)))) print('Act saved to ' + model_file + "_" + str(episode_number) + "_" + str(int(np.round(max_eval_reward_mean)))) with tempfile.TemporaryDirectory() as td: td = './logs' evaluate(0, 0) obs = env.reset() t = -1 all_means = [] q_stats = [] current_qs = [] training_game_scores = [] training_timesteps = [] weights_mean = [] weights_stds = [] names = [] for joint_i in range(num_action_streams): names.append([]) names[-1].append("deepq/q_func/action_value/" + str(joint_i) + "128/w_sigma:0") names[-1].append("deepq/q_func/action_value/" + str(joint_i) + "out/w_sigma:0") mus = [] for nl in names: mus.append([]) for n in nl: for v in tf.trainable_variables(): if v.name == n: mus[-1].append(v) variables_names = [v.name for v in tf.trainable_variables()] print(variables_names) print(mus) while True: t += 1 # Select action and update exploration probability action_idxes = np.array( act(np.array(obs)[None], update_eps=exploration.value(t))) qs = np.array(q_val(np.array(obs)[None], stochastic=False)) # deterministic tt = [] for val in qs: tt.append(np.std(val)) current_qs.append(tt) # Convert sub-actions indexes (discrete sub-actions) to continuous controls action = action_idxes / num_action_grains * actions_range + low if not epsilon_greedy: # Gaussian noise actions_greedy = action action_idx_stoch = [] action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: # Sample from a Gaussian with mean at the greedy action and a std following a schedule of choice a_stoch = np.random.normal(loc=a_greedy, scale=std_schedule.value(t)) # Convert sampled cont action to an action idx a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) # Check if action is in range if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action_idx_stoch.append(a_idx_stoch) action.append(a_stoch) out_of_range_action = False action_idxes = action_idx_stoch new_obs, rew, done, _ = env.step(np.array(action)) # Store transition in the replay buffer replay_buffer.add(obs, action_idxes, rew, new_obs, float(done)) obs = new_obs reward_sum += rew if done: obs = env.reset() time_spent_exploring[-1] = int(100 * exploration.value(t)) time_spent_exploring.append(0) episode_rewards.append(reward_sum) training_game_scores.append(reward_sum) training_timesteps.append(t) time_steps[-1] = t reward_sum = 0.0 time_steps.append(0) q_stats.append(np.mean(current_qs, 0)) current_qs = [] t_m = [] ts_m = [] for mu_mod in mus: tt = [] tts = [] for mu_l in mu_mod: muw = np.ravel(sess.run(mu_l)) tt.append(np.mean(muw)) tts.append(np.std(muw)) t_m.append(tt) ts_m.append(tts) weights_mean.append(t_m) weights_stds.append(ts_m) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train( obses_t, actions, rewards, obses_tp1, dones, weights) # np.ones_like(rewards)) #TEMP AT NEW # print(np.mean(td_errors)) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) n_trainings += 1 if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically update_target() if len(episode_rewards) == 0: mean_100ep_reward = 0 elif len(episode_rewards) < 100: mean_100ep_reward = np.mean(episode_rewards) else: mean_100ep_reward = np.mean(episode_rewards[-100:]) all_means.append(mean_100ep_reward) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) current_time = time.time() logger.record_tabular("trainings per second", n_trainings / (current_time - prev_time)) logger.dump_tabular() n_trainings = 0 prev_time = current_time if t > learning_starts and num_episodes > 100: if displayed_mean_reward is None or mean_100ep_reward > displayed_mean_reward: if print_freq is not None: logger.log("Mean reward increase: {} -> {}".format( displayed_mean_reward, mean_100ep_reward)) displayed_mean_reward = mean_100ep_reward # Performance evaluation with a greedy policy if done and num_episodes % eval_freq == 0: evaluate(t + 1, num_episodes) obs = env.reset() pickle.dump(q_stats, open( str(log_index) + "q_stat_stds99_" + log_prefix + ".pkl", 'wb'), protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(weights_mean, open( str(log_index) + "q_stat_wmu99_" + log_prefix + ".pkl", 'wb'), protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(weights_stds, open( str(log_index) + "q_stat_wsig99_" + log_prefix + ".pkl", 'wb'), protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(game_scores, open( str(log_index) + "q_stat_scores99_" + log_prefix + ".pkl", 'wb'), protocol=pickle.HIGHEST_PROTOCOL) return ActWrapper(act, act_params)
def __init__(self, args, env, writer=None): """ init agent """ self.eval_env = copy.deepcopy(env) self.args = args self.state_dim = env.reset().shape self.action_dim = env.action_space.n self.device = torch.device("cuda" if ( torch.cuda.is_available() and self.args.gpu) else "cpu") # set the random seed in the main launcher random.seed(self.args.seed) torch.manual_seed(self.args.seed) np.random.seed(self.args.seed) if self.args.gpu: torch.cuda.manual_seed(self.args.seed) self.writer = writer if self.args.env_name == "grid": self.dqn = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.dqn_target = OneHotDQN(self.state_dim, self.action_dim).to(self.device) # create more networks here self.cost_model = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.review_model = OneHotValueNetwork(self.state_dim).to( self.device) self.target_cost_model = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.target_review_model = OneHotValueNetwork(self.state_dim).to( self.device) self.target_cost_model.load_state_dict( self.cost_model.state_dict()) self.target_review_model.load_state_dict( self.review_model.state_dict()) else: raise Exception("Not implemented yet!") # copy parameters self.dqn_target.load_state_dict(self.dqn.state_dict()) self.optimizer = torch.optim.Adam(self.dqn.parameters(), lr=self.args.lr) self.review_optimizer = optim.Adam(self.review_model.parameters(), lr=self.args.cost_reverse_lr) self.critic_optimizer = optim.Adam(self.cost_model.parameters(), lr=self.args.cost_q_lr) # parallel agents def make_env(): def _thunk(): env = create_env(args) return env return _thunk envs = [make_env() for i in range(self.args.num_envs)] self.envs = SubprocVecEnv(envs) # create epsilon and beta schedule self.eps_decay = LinearSchedule(50000 * 200, 0.01, 1.0) # self.eps_decay = LinearSchedule(self.args.num_episodes * 200, 0.01, 1.0) self.total_steps = 0 self.num_episodes = 0 # for storing resutls self.results_dict = { "train_rewards": [], "train_constraints": [], "eval_rewards": [], "eval_constraints": [], } self.cost_indicator = "none" if "grid" in self.args.env_name: self.cost_indicator = 'pit' else: raise Exception("not implemented yet") self.eps = self.eps_decay.value(self.total_steps)
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
class Learn: def __init__(self, config, env): super().__init__() self.config = config self.env = env self.agent_ids = self.get_agent_ids() self.replay_memory, self.beta_schedule = self.init_replay_memory() self.optimizer = tf.keras.optimizers.Adam(self.config.lr) # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule(schedule_timesteps=int(config.exploration_fraction * config.num_timesteps), initial_p=1.0, final_p=config.exploration_final_eps) self.eps = tf.Variable(0.0) self.models, self.target_models = self._init_networks() self.agents = [Agent(config, self.models[agent_id], self.target_models[agent_id], agent_id) for agent_id in self.agent_ids] def _init_networks(self): network = Network(self.config, self.agent_ids) # base_model = network.init_base_model() # target_base_model = network.init_base_model() return network.build_models(), network.build_models() def get_agent_ids(self): return [agent_id for agent_id in range(self.config.num_agents)] def init_replay_memory(self): """ :return: replay_buffer, beta_schedule """ if self.config.prioritized_replay: replay_buffer = PrioritizedReplayBuffer(self.config.buffer_size, alpha=self.config.prioritized_replay_alpha) if self.config.prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = self.config.num_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=self.config.prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(self.config.buffer_size) beta_schedule = None return replay_buffer, beta_schedule @tf.function def get_actions(self, obs, stochastic=True, update_eps=-1): """ :param obs: observation for all agents :param stochastic: True for Train phase and False for test phase :param update_eps: epsilon update for eps-greedy :return: actions, q_values of all agents as fps """ deterministic_actions = [] fps = [] for agent_id in self.agent_ids: deterministic_action, fp = self.agents[agent_id].greedy_action(obs[agent_id]) deterministic_actions.append(deterministic_action) fps.append(fp) # print(f' deterministic_actions {deterministic_actions}') # print(f' fps {fps}') random_actions = tf.random.uniform(tf.stack([self.config.num_agents]), minval=0, maxval=self.config.num_actions, dtype=tf.int64) # print(f' random_actions {random_actions}') chose_random = tf.random.uniform(tf.stack([self.config.num_agents]), minval=0, maxval=1, dtype=tf.float32) < self.eps # print(f' chose_random {chose_random}') stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) # print(f' stochastic_actions.numpy() {stochastic_actions.numpy()}') if stochastic: actions = stochastic_actions.numpy() else: actions = deterministic_actions if update_eps >= 0: self.eps.assign(update_eps) # print(f' actions {actions}') return actions, fps @tf.function def get_max_values(self, obs): """ :param obs: list observations one for each agent :return: best values based on Q-Learning formula maxQ(s',a') """ best_q_vals = [] for agent_id in self.agent_ids: best_q_val = self.agents[agent_id].max_value(obs[agent_id]) best_q_vals.append(best_q_val) # print(f' best_q_vals.numpy() {best_q_vals.numpy()}') return best_q_vals @tf.function def compute_loss(self, obses_t, actions, rewards, dones, weights, fps=None): """ :param obses_t: list observations one for each agent :param actions: :param rewards: :param dones: :param weights: :param fps: :return: loss and td errors tensor list one for each agent """ losses = [] td_errors = np.zeros(self.config.batch_size) for agent_id in self.agent_ids: loss, td_error = self.agents[agent_id].compute_loss(obses_t[agent_id], actions[agent_id], rewards[agent_id], dones[agent_id], weights[agent_id], fps=None) losses.append(loss) td_errors += td_error return losses, td_errors @tf.function() def train(self, obses_t, actions, rewards, dones, weights, fps=None): with tf.GradientTape() as tape: losses, td_errors = self.compute_loss(obses_t, actions, rewards, dones, weights, fps) loss = tf.reduce_sum(losses) params = tape.watched_variables() # print(f' param {params}') grads = tape.gradient(loss, params) if self.config.grad_norm_clipping: clipped_grads = [] for grad in grads: clipped_grads.append(tf.clip_by_norm(grad, self.config.grad_norm_clipping)) grads = clipped_grads self.optimizer.apply_gradients(list(zip(grads, params))) return loss, td_errors def create_fingerprints(self, fps, t): # TODO fps = [] if self.config.num_agents > 1: for agent_id in self.agent_ids: fp = fps[:agent_id] fp.extend(fps[agent_id + 1:]) fp_a = np.concatenate((fp, [[self.exploration.value(t) * 100, t]]), axis=None) fps.append(fp_a) return fps def learn(self): episode_rewards = [0.0] obs = self.env.reset() print(obs.shape) done = False tstart = time.time() episodes_trained = [0, False] # [episode_number, Done flag] for t in range(self.config.num_timesteps): # if t == 102: # break update_eps = tf.constant(self.exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_obs1, mb_dones = [], [], [], [], [] for n_step in range(self.config.n_steps): # print(f't is {t} -- n_steps is {n_step}') actions, _ = self.get_actions(tf.constant(obs), update_eps=update_eps) if self.config.num_agents == 1: obs1, rews, done, _ = self.env.step(actions[0]) else: obs1, rews, done, _ = self.env.step(actions) # TODO fingerprint computation mb_obs.append(obs.copy()) mb_actions.append(actions) mb_dones.append([float(done) for _ in self.agent_ids]) # print(f'rewards is {rews}') if self.config.same_reward_for_agents: rews = [np.max(rews) for _ in range(len(rews))] # for cooperative purpose same reward for every one mb_obs1.append(obs1.copy()) mb_rewards.append(rews) obs = obs1 episode_rewards[-1] += np.max(rews) if done: episodes_trained[0] = episodes_trained[0] + 1 episodes_trained[1] = True episode_rewards.append(0.0) obs = self.env.reset() mb_dones.append([float(done) for _ in self.agent_ids]) # swap axes to have lists in shape of (num_agents, num_steps, ...) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] # print(f' before discount mb_rewards is {mb_rewards}') if self.config.gamma > 0.0: # print(f' last_values {last_values}') for agent_id, (rewards, dones) in enumerate(zip(mb_rewards, mb_dones)): value = self.agents[agent_id].max_value(tf.constant(obs1[agent_id])) rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[agent_id] = rewards # print(f' after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add(mb_obs, mb_actions, mb_rewards, mb_obs1, mb_masks) if t > self.config.learning_starts and t % self.config.train_freq == 0: if self.config.prioritized_replay: experience = self.replay_memory.sample(self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = self.replay_memory.sample(self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None # print(f'obses_t.shape {obses_t.shape}') # shape format is (batch_size, agent_num, n_steps, ...) obses_t = obses_t.swapaxes(0, 1) actions = actions.swapaxes(0, 1) rewards = rewards.swapaxes(0, 1) # print(f'rewards.shape {rewards.shape}') obses_tp1 = obses_tp1.swapaxes(0, 1) dones = dones.swapaxes(0, 1) print(f'weights.shape {weights.shape}') weights = weights.swapaxes(0, 1) # weights shape is (1, batch_size, n_steps) print(f'weights.shape {weights.shape}') # shape format is (agent_num, batch_size, n_steps, ...) if 'rnn' not in self.config.network: shape = obses_t.shape obses_t = np.reshape(obses_t, (shape[0], shape[1] * shape[2], *shape[3:])) shape = actions.shape actions = np.reshape(actions, (shape[0], shape[1] * shape[2], *shape[3:])) shape = rewards.shape rewards = np.reshape(rewards, (shape[0], shape[1] * shape[2], *shape[3:])) shape = dones.shape dones = np.reshape(dones, (shape[0], shape[1] * shape[2], *shape[3:])) shape = weights.shape weights = np.reshape(weights, (shape[0], shape[1])) # print(f'obses_t.shape {obses_t.shape}') # shape format is (agent_num, batch_size * n_steps, ...) obses_t = tf.constant(obses_t) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) weights = tf.constant(weights) # print(f' obses_t.shape {obses_t.shape}') # print(f' actions.shape {actions.shape}') # print(f' rewards.shape {rewards.shape}') # print(f' dones.shape {dones.shape}') # print(f' weights.shape {weights.shape}') loss, td_errors = self.train(obses_t, actions, rewards, dones, weights) # print(f'td_errors.shape = {np.array(td_errors).shape} , batch_idxes.shape = {np.array(batch_idxes).shape}') if self.config.prioritized_replay: new_priorities = np.abs(td_errors) + self.config.prioritized_replay_eps self.replay_memory.update_priorities(batch_idxes, new_priorities) if t % (self.config.train_freq * 50) == 0: print(f't = {t} , loss = {loss}') if t > self.config.learning_starts and t % self.config.target_network_update_freq == 0: # Update target network periodically. for agent_id in self.agent_ids: self.agents[agent_id].soft_update_target() if t % self.config.playing_test == 0 and t != 0: # self.network.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if t % (self.config.print_freq*100) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print(f'eps {self.exploration.value(t)} -- time {t - self.config.print_freq*1000} to {t} steps: {nseconds}') # if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: if episodes_trained[1] and episodes_trained[0] % self.config.print_freq == 0: episodes_trained[1] = False logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 past episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular() def play_test_games(self): num_tests = self.config.num_tests test_env = init_env(self.config, mode='test') test_rewards = np.zeros(num_tests) for i in range(num_tests): done = False obs = test_env.reset() iter = 0 while True: iter += 1 actions, _ = self.get_actions(tf.constant(obs), stochastic=False) # print(f'actions[0] {actions[0]}, test_done {done}, {iter}') if self.config.num_agents == 1: obs1, rews, done, _ = test_env.step(actions[0]) else: obs1, rews, done, _ = test_env.step(actions) # ToDo fingerprint computation obs = obs1 if done or iter >= self.config.max_episodes_length: # print(f'test {i} rewards is {rews}') test_rewards[i] = np.mean(rews) break print(f'test_rewards: {test_rewards} \n mean reward of {num_tests} tests: {np.mean(test_rewards)}') test_env.close()
class SafeSarsaAgent(object): def __init__(self, args, env, writer=None): """ init agent """ self.eval_env = copy.deepcopy(env) self.args = args self.state_dim = env.reset().shape self.action_dim = env.action_space.n self.device = torch.device("cuda" if ( torch.cuda.is_available() and self.args.gpu) else "cpu") # set the random seed in the main launcher random.seed(self.args.seed) torch.manual_seed(self.args.seed) np.random.seed(self.args.seed) if self.args.gpu: torch.cuda.manual_seed(self.args.seed) self.writer = writer if self.args.env_name == "grid": self.dqn = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.dqn_target = OneHotDQN(self.state_dim, self.action_dim).to(self.device) # create more networks here self.cost_model = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.review_model = OneHotValueNetwork(self.state_dim).to( self.device) self.target_cost_model = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.target_review_model = OneHotValueNetwork(self.state_dim).to( self.device) self.target_cost_model.load_state_dict( self.cost_model.state_dict()) self.target_review_model.load_state_dict( self.review_model.state_dict()) else: raise Exception("Not implemented yet!") # copy parameters self.dqn_target.load_state_dict(self.dqn.state_dict()) self.optimizer = torch.optim.Adam(self.dqn.parameters(), lr=self.args.lr) self.review_optimizer = optim.Adam(self.review_model.parameters(), lr=self.args.cost_reverse_lr) self.critic_optimizer = optim.Adam(self.cost_model.parameters(), lr=self.args.cost_q_lr) # parallel agents def make_env(): def _thunk(): env = create_env(args) return env return _thunk envs = [make_env() for i in range(self.args.num_envs)] self.envs = SubprocVecEnv(envs) # create epsilon and beta schedule self.eps_decay = LinearSchedule(50000 * 200, 0.01, 1.0) # self.eps_decay = LinearSchedule(self.args.num_episodes * 200, 0.01, 1.0) self.total_steps = 0 self.num_episodes = 0 # for storing resutls self.results_dict = { "train_rewards": [], "train_constraints": [], "eval_rewards": [], "eval_constraints": [], } self.cost_indicator = "none" if "grid" in self.args.env_name: self.cost_indicator = 'pit' else: raise Exception("not implemented yet") self.eps = self.eps_decay.value(self.total_steps) def pi(self, state, current_cost=0.0, greedy_eval=False): """ take the action based on the current policy """ with torch.no_grad(): # to take random action or not if (random.random() > self.eps_decay.value( self.total_steps)) or greedy_eval: q_value = self.dqn(state) # chose the max/greedy actions action = q_value.max(1)[1].cpu().numpy() else: action = np.random.randint(0, high=self.action_dim, size=(self.args.num_envs, )) return action def safe_deterministic_pi(self, state, current_cost=0.0, greedy_eval=False): """ take the action based on the current policy """ with torch.no_grad(): # to take random action or not if (random.random() > self.eps_decay.value( self.total_steps)) or greedy_eval: # No random action q_value = self.dqn(state) # Q_D(s,a) cost_q_val = self.cost_model(state) cost_r_val = self.review_model(state) # find the action set # create the filtered mask here constraint_mask = torch.le(cost_q_val + cost_r_val, self.args.d0 + current_cost).float() filtered_Q = (q_value + 1000.0) * (constraint_mask) filtered_action = filtered_Q.max(1)[1].cpu().numpy() # alt action to take if infeasible solution # minimize the cost alt_action = (-1. * cost_q_val).max(1)[1].cpu().numpy() c_sum = constraint_mask.sum(1) action_mask = (c_sum == torch.zeros_like(c_sum)).cpu().numpy() action = (1 - action_mask ) * filtered_action + action_mask * alt_action return action else: # create an array of random indices, for all the environments action = np.random.randint(0, high=self.action_dim, size=(self.args.num_envs, )) return action def compute_n_step_returns(self, next_value, rewards, masks): """ n-step SARSA returns """ R = next_value returns = [] for step in reversed(range(len(rewards))): R = rewards[step] + self.args.gamma * R * masks[step] returns.insert(0, R) return returns def compute_reverse_n_step_returns(self, prev_value, costs, begin_masks): """ n-step SARSA returns (backward in time) """ R = prev_value returns = [] for step in range(len(costs)): R = costs[step] + self.args.gamma * R * begin_masks[step] returns.append(R) return returns def log_episode_stats(self, ep_reward, ep_constraint): """ log the stats for environment performance """ # log episode statistics self.results_dict["train_rewards"].append(ep_reward) self.results_dict["train_constraints"].append(ep_constraint) if self.writer: self.writer.add_scalar("Return", ep_reward, self.num_episodes) self.writer.add_scalar("Constraint", ep_constraint, self.num_episodes) log( 'Num Episode {}\t'.format(self.num_episodes) + \ 'E[R]: {:.2f}\t'.format(ep_reward) +\ 'E[C]: {:.2f}\t'.format(ep_constraint) +\ 'avg_train_reward: {:.2f}\t'.format(np.mean(self.results_dict["train_rewards"][-100:])) +\ 'avg_train_constraint: {:.2f}\t'.format(np.mean(self.results_dict["train_constraints"][-100:])) ) def run(self): """ learning happens here """ self.total_steps = 0 self.eval_steps = 0 # reset state and env state = self.envs.reset() prev_state = torch.FloatTensor(state).to(self.device) current_cost = torch.zeros(self.args.num_envs, 1).float().to(self.device) ep_reward = 0 ep_len = 0 ep_constraint = 0 start_time = time.time() while self.num_episodes < self.args.num_episodes: values = [] c_q_vals = [] c_r_vals = [] states = [] actions = [] mus = [] prev_states = [] rewards = [] done_masks = [] begin_masks = [] constraints = [] # n-step sarsa for _ in range(self.args.traj_len): state = torch.FloatTensor(state).to(self.device) # get the action action = self.safe_deterministic_pi(state, current_cost=current_cost) next_state, reward, done, info = self.envs.step(action) # convert it back to tensor action = torch.LongTensor(action).unsqueeze(1).to(self.device) q_values = self.dqn(state) Q_value = q_values.gather(1, action) c_q_values = self.cost_model(state) cost_q_val = c_q_values.gather(1, action) cost_r_val = self.review_model(state) # logging mode for only agent 1 ep_reward += reward[0] ep_constraint += info[0][self.cost_indicator] values.append(Q_value) c_r_vals.append(cost_r_val) c_q_vals.append(cost_q_val) rewards.append( torch.FloatTensor(reward).unsqueeze(1).to(self.device)) done_masks.append( torch.FloatTensor(1.0 - done).unsqueeze(1).to(self.device)) begin_masks.append( torch.FloatTensor([(1.0 - ci['begin']) for ci in info ]).unsqueeze(1).to(self.device)) constraints.append( torch.FloatTensor([ci[self.cost_indicator] for ci in info ]).unsqueeze(1).to(self.device)) prev_states.append(prev_state) states.append(state) actions.append(action) # update the state prev_state = state state = next_state # update the current cost # if done flag is true for the current env, this implies that the next_state cost = 0.0 # because the agent starts with 0.0 cost (or doesn't have access to it anyways) current_cost = torch.FloatTensor([ (ci[self.cost_indicator] * (1.0 - di)) for ci, di in zip(info, done) ]).unsqueeze(1).to(self.device) self.total_steps += (1 * self.args.num_envs) # hack to reuse the same code # iteratively add each done episode, so that can eval at regular interval for d_idx in range(done.sum()): if done[0] and d_idx == 0: if self.num_episodes % self.args.log_every == 0: self.log_episode_stats(ep_reward, ep_constraint) # reset the rewards anyways ep_reward = 0 ep_constraint = 0 self.num_episodes += 1 # eval the policy here after eval_every steps if self.num_episodes % self.args.eval_every == 0: eval_reward, eval_constraint = self.eval() self.results_dict["eval_rewards"].append(eval_reward) self.results_dict["eval_constraints"].append( eval_constraint) log('----------------------------------------') log('Eval[R]: {:.2f}\t'.format(eval_reward) +\ 'Eval[C]: {}\t'.format(eval_constraint) +\ 'Episode: {}\t'.format(self.num_episodes) +\ 'avg_eval_reward: {:.2f}\t'.format(np.mean(self.results_dict["eval_rewards"][-10:])) +\ 'avg_eval_constraint: {:.2f}\t'.format(np.mean(self.results_dict["eval_constraints"][-10:])) ) log('----------------------------------------') if self.writer: self.writer.add_scalar("eval_reward", eval_reward, self.eval_steps) self.writer.add_scalar("eval_constraint", eval_constraint, self.eval_steps) self.eval_steps += 1 # break here if self.num_episodes >= self.args.num_episodes: break # calculate targets here next_state = torch.FloatTensor(next_state).to(self.device) next_q_values = self.dqn(next_state) next_action = self.safe_deterministic_pi(next_state, current_cost) next_action = torch.LongTensor(next_action).unsqueeze(1).to( self.device) next_q_values = next_q_values.gather(1, next_action) # calculate targets target_Q_vals = self.compute_n_step_returns( next_q_values, rewards, done_masks) Q_targets = torch.cat(target_Q_vals).detach() Q_values = torch.cat(values) # loss loss = F.mse_loss(Q_values, Q_targets) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # calculate the cost-targets next_c_value = self.cost_model(next_state) next_c_value = next_c_value.gather(1, next_action) cq_targets = self.compute_n_step_returns(next_c_value, constraints, done_masks) C_q_targets = torch.cat(cq_targets).detach() C_q_vals = torch.cat(c_q_vals) cost_critic_loss = F.mse_loss(C_q_vals, C_q_targets) self.critic_optimizer.zero_grad() cost_critic_loss.backward() self.critic_optimizer.step() # For the constraints (reverse) prev_value = self.review_model(prev_states[0]) c_r_targets = self.compute_reverse_n_step_returns( prev_value, constraints, begin_masks) C_r_targets = torch.cat(c_r_targets).detach() C_r_vals = torch.cat(c_r_vals) cost_review_loss = F.mse_loss(C_r_vals, C_r_targets) self.review_optimizer.zero_grad() cost_review_loss.backward() self.review_optimizer.step() # done with all the training # save the models self.save_models() def eval(self): """ evaluate the current policy and log it """ avg_reward = [] avg_constraint = [] with torch.no_grad(): for _ in range(self.args.eval_n): state = self.eval_env.reset() done = False ep_reward = 0 ep_constraint = 0 ep_len = 0 start_time = time.time() current_cost = torch.FloatTensor([0.0]).to( self.device).unsqueeze(0) while not done: # convert the state to tensor state = torch.FloatTensor(state).to( self.device).unsqueeze(0) # get the policy action action = self.safe_deterministic_pi( state, current_cost=current_cost, greedy_eval=True)[0] next_state, reward, done, info = self.eval_env.step(action) ep_reward += reward ep_len += 1 ep_constraint += info[self.cost_indicator] # update the state state = next_state current_cost = torch.FloatTensor([ info[self.cost_indicator] * (1.0 - done) ]).to(self.device).unsqueeze(0) avg_reward.append(ep_reward) avg_constraint.append(ep_constraint) return np.mean(avg_reward), np.mean(avg_constraint) def save_models(self): """create results dict and save""" models = { "dqn": self.dqn.state_dict(), "cost_model": self.cost_model.state_dict(), "review_model": self.review_model.state_dict(), "env": copy.deepcopy(self.eval_env), } torch.save(models, os.path.join(self.args.out, 'models.pt')) # save the results torch.save(self.results_dict, os.path.join(self.args.out, 'results_dict.pt')) def load_models(self): models = torch.load(os.path.join(self.args.out, 'models.pt')) self.dqn.load_state_dict(models["dqn"]) self.eval_env = models["env"]
def learn(env_id, q_func, lr=5e-4, max_timesteps=10000, buffer_size=5000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, train_steps=10, learning_starts=500, batch_size=32, print_freq=10, checkpoint_freq=100, model_dir=None, gamma=1.0, target_network_update_freq=50, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, player_processes=None, player_connections=None): env, _, _ = create_gvgai_environment(env_id) # Create all the functions necessary to train the model # expert_decision_maker = ExpertDecisionMaker(env=env) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) session = tf.Session() session.__enter__() policy_path = os.path.join(model_dir, "Policy.pkl") model_path = os.path.join(model_dir, "model", "model") if os.path.isdir(os.path.join(model_dir, "model")): load_state(model_path) else: act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Initialize the parameters and copy them to the target network. U.initialize() update_target() act.save(policy_path) save_state(model_path) env.close() # Create the replay buffer if prioritized_replay: replay_buffer_path = os.path.join(model_dir, "Prioritized_replay.pkl") if os.path.isfile(replay_buffer_path): with open(replay_buffer_path, 'rb') as input_file: replay_buffer = pickle.load(input_file) else: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer_path = os.path.join(model_dir, "Normal_replay.pkl") if os.path.isfile(replay_buffer_path): with open(replay_buffer_path, 'rb') as input_file: replay_buffer = pickle.load(input_file) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) episode_rewards = list() saved_mean_reward = -999999999 signal.signal(signal.SIGQUIT, signal_handler) global terminate_learning total_timesteps = 0 for timestep in range(max_timesteps): if terminate_learning: break for connection in player_connections: experiences, reward = connection.recv() episode_rewards.append(reward) for experience in experiences: replay_buffer.add(*experience) total_timesteps += 1 if total_timesteps < learning_starts: if timestep % 10 == 0: print("not strated yet", flush=True) continue if timestep % train_freq == 0: for i in range(train_steps): # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(total_timesteps)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if timestep % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if print_freq is not None and timestep % print_freq == 0: logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular( "% time spent exploring", int(100 * exploration.value(total_timesteps))) logger.dump_tabular() if timestep % checkpoint_freq == 0 and mean_100ep_reward > saved_mean_reward: act.save(policy_path) save_state(model_path) saved_mean_reward = mean_100ep_reward with open(replay_buffer_path, 'wb') as output_file: pickle.dump(replay_buffer, output_file, pickle.HIGHEST_PROTOCOL) send_message_to_all(player_connections, Message.UPDATE) send_message_to_all(player_connections, Message.TERMINATE) if mean_100ep_reward > saved_mean_reward: act.save(policy_path) with open(replay_buffer_path, 'wb') as output_file: pickle.dump(replay_buffer, output_file, pickle.HIGHEST_PROTOCOL) for player_process in player_processes: player_process.join() # player_process.terminate() return act.load(policy_path)
def learn_continuous_tasks(env, q_func, env_name, time_stamp, total_num_episodes, num_actions_pad=33, lr=1e-4, grad_norm_clipping=10, max_timesteps=int(1e8), buffer_size=int(1e6), train_freq=1, batch_size=64, print_freq=10, learning_starts=1000, gamma=0.99, target_network_update_freq=500, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=2e6, prioritized_replay_eps=int(1e8), num_cpu=16, timesteps_std=1e6, initial_std=0.4, final_std=0.05, eval_freq=100, n_eval_episodes=10, eval_std=0.01, callback=None): """Train a branching deepq model to solve continuous control tasks via discretization. Current assumptions in the implementation: - for solving continuous control domains via discretization (can be adjusted to be compatible with naturally disceret-action domains using 'env.action_space.n') - uniform number of sub-actions per action dimension (can be generalized to heterogeneous number of sub-actions across branches) Parameters ------- env : gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions_pad: int number of sub-actions per action dimension (= num of discretization grains/bars + 1) lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimize for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed 0.1 for dqn-baselines exploration_final_eps: float final value of random action probability 0.02 for dqn-baselines train_freq: int update the model every `train_freq` steps. batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor grad_norm_clipping: int set None for no clipping target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the unified TD error for updating priorities. Erratum: The camera-ready copy of this paper incorrectly reported 1e-8. The value used to produece the results is 1e8. num_cpu: int number of cpus to use for training losses_version: int optimization version number dir_path: str path for logs and results to be stored in callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput(env.observation_space.shape, name=name) num_action_grains = num_actions_pad - 1 num_action_dims = env.action_space.shape[0] num_action_streams = num_action_dims num_actions = num_actions_pad * num_action_streams # total numb network outputs for action branching with one action dimension per branch act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, num_action_streams=num_action_streams, batch_size=batch_size, learning_rate=lr, grad_norm_clipping=grad_norm_clipping, gamma=gamma, scope="deepq", reuse=None) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, 'num_action_streams': num_action_streams, } # prioritized_replay: create the replay buffer replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) # epsilon_greedy = False: just greedy policy exploration = ConstantSchedule(value=0.0) # greedy policy std_schedule = LinearSchedule(schedule_timesteps=timesteps_std, initial_p=initial_std, final_p=final_std) # Initialize the parameters and copy them to the target network. U.initialize() update_target() # Initialize the parameters used for converting branching, discrete action indeces to continuous actions low = env.action_space.low high = env.action_space.high actions_range = np.subtract(high, low) episode_rewards = [] reward_sum = 0.0 num_episodes = 0 time_steps = [0] time_spent_exploring = [0] prev_time = time.time() n_trainings = 0 # Set up on-demand rendering of Gym environments using keyboard controls: 'r'ender or 's'top import termios, fcntl, sys fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO render = False displayed_mean_reward = None def evaluate(step, episode_number): global max_eval_reward_mean, model_saved print('Evaluate...') eval_reward_sum = 0.0 # Run evaluation episodes for eval_episode in range(n_eval_episodes): obs = env.reset() done = False while not done: # Choose action action_idxes = np.array( act(np.array(obs)[None], stochastic=False)) # deterministic actions_greedy = action_idxes / num_action_grains * actions_range + low if eval_std == 0.0: action = actions_greedy else: action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: a_stoch = np.random.normal(loc=a_greedy, scale=eval_std) a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action.append(a_stoch) out_of_range_action = False # Step obs, rew, done, _ = env.step(action) eval_reward_sum += rew # Average the rewards and log eval_reward_mean = eval_reward_sum / n_eval_episodes print(eval_reward_mean, 'over', n_eval_episodes, 'episodes') with open("results/{}_{}_eval.csv".format(time_stamp, env_name), "a") as eval_fw: eval_writer = csv.writer( eval_fw, delimiter="\t", lineterminator="\n", ) eval_writer.writerow([episode_number, step, eval_reward_mean]) if max_eval_reward_mean is None or eval_reward_mean > max_eval_reward_mean: logger.log( "Saving model due to mean eval increase: {} -> {}".format( max_eval_reward_mean, eval_reward_mean)) U.save_state(model_file) model_saved = True max_eval_reward_mean = eval_reward_mean with tempfile.TemporaryDirectory() as td: model_file = os.path.join(td, "model") evaluate(0, 0) obs = env.reset() with open("results/{}_{}.csv".format(time_stamp, env_name), "w") as fw: writer = csv.writer( fw, delimiter="\t", lineterminator="\n", ) t = -1 while True: t += 1 # Select action and update exploration probability action_idxes = np.array( act(np.array(obs)[None], update_eps=exploration.value(t))) # Convert sub-actions indexes (discrete sub-actions) to continuous controls action = action_idxes / num_action_grains * actions_range + low # epsilon_greedy = False: use Gaussian noise actions_greedy = action action_idx_stoch = [] action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: # Sample from a Gaussian with mean at the greedy action and a std following a schedule of choice a_stoch = np.random.normal(loc=a_greedy, scale=std_schedule.value(t)) # Convert sampled cont action to an action idx a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) # Check if action is in range if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action_idx_stoch.append(a_idx_stoch) action.append(a_stoch) out_of_range_action = False action_idxes = action_idx_stoch new_obs, rew, done, _ = env.step(action) # On-demand rendering if (t + 1) % 100 == 0: # TO DO better? termios.tcsetattr(fd, termios.TCSANOW, newattr) oldflags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK) try: try: c = sys.stdin.read(1) if c == 'r': print() print('Rendering begins...') render = True elif c == 's': print() print('Stop rendering!') render = False env.render(close=True) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) fcntl.fcntl(fd, fcntl.F_SETFL, oldflags) # Visualize Gym environment on render if render: env.render() # Store transition in the replay buffer replay_buffer.add(obs, action_idxes, rew, new_obs, float(done)) obs = new_obs reward_sum += rew if done: obs = env.reset() time_spent_exploring[-1] = int(100 * exploration.value(t)) time_spent_exploring.append(0) episode_rewards.append(reward_sum) time_steps[-1] = t reward_sum = 0.0 time_steps.append(0) # Frequently log to file writer.writerow( [len(episode_rewards), t, episode_rewards[-1]]) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer # prioritized_replay experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience td_errors = train( obses_t, actions, rewards, obses_tp1, dones, weights) #np.ones_like(rewards)) #TEMP AT NEW # prioritized_replay new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) n_trainings += 1 if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically update_target() if len(episode_rewards) == 0: mean_100ep_reward = 0 elif len(episode_rewards) < 100: mean_100ep_reward = np.mean(episode_rewards) else: mean_100ep_reward = np.mean(episode_rewards[-100:]) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) current_time = time.time() logger.record_tabular( "trainings per second", n_trainings / (current_time - prev_time)) logger.dump_tabular() n_trainings = 0 prev_time = current_time if t > learning_starts and num_episodes > 100: if displayed_mean_reward is None or mean_100ep_reward > displayed_mean_reward: if print_freq is not None: logger.log("Mean reward increase: {} -> {}".format( displayed_mean_reward, mean_100ep_reward)) displayed_mean_reward = mean_100ep_reward # Performance evaluation with a greedy policy if done and num_episodes % eval_freq == 0: evaluate(t + 1, num_episodes) obs = env.reset() # STOP training if num_episodes >= total_num_episodes: break if model_saved: logger.log("Restore model with mean eval: {}".format( max_eval_reward_mean)) U.load_state(model_file) data_to_log = { 'time_steps': time_steps, 'episode_rewards': episode_rewards, 'time_spent_exploring': time_spent_exploring } # Write to file the episodic rewards, number of steps, and the time spent exploring with open("results/{}_{}.txt".format(time_stamp, env_name), 'wb') as fp: pickle.dump(data_to_log, fp) return ActWrapper(act, act_params)
def learn_att(env, q_func, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) # q_func = build_q_func(network, **network_kwargs) since no network setting # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = build_train_att( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, #add a mask function for the choice of actions mask_func= ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act
def main(): with open('cartpole.json', encoding='utf-8') as config_file: config = json.load(config_file) env = gym.make('CartPole-v0') state_shape = env.observation_space.shape action_count = env.action_space.n layers = [] for layer in config['layers']: layers.append(Dense(layer, activation=C.relu)) layers.append(Dense((action_count, config['n']), activation=None)) model_func = Sequential(layers) replay_buffer = ReplayBuffer(config['buffer_capacity']) # Fill the buffer with randomly generated samples state = env.reset() for i in range(config['buffer_capacity']): action = env.action_space.sample() post_state, reward, done, _ = env.step(action) replay_buffer.add(state.astype(np.float32), action, reward, post_state.astype(np.float32), float(done)) if done: state = env.reset() reward_buffer = np.zeros(config['max_episodes'], dtype=np.float32) losses = [] epsilon_schedule = LinearSchedule(1, 0.01, config['max_episodes']) agent = CategoricalAgent(state_shape, action_count, model_func, config['vmin'], config['vmax'], config['n'], lr=config['lr'], gamma=config['gamma']) log_freq = config['log_freq'] for episode in range(1, config['max_episodes'] + 1): state = env.reset().astype(np.float32) done = False while not done: action = agent.act(state, epsilon_schedule.value(episode)) post_state, reward, done, _ = env.step(action) post_state = post_state.astype(np.float32) replay_buffer.add(state, action, reward, post_state, float(done)) reward_buffer[episode - 1] += reward state = post_state minibatch = replay_buffer.sample(config['minibatch_size']) agent.train(*minibatch) loss = agent.trainer.previous_minibatch_loss_average losses.append(loss) if episode % config['target_update_freq'] == 0: agent.update_target() if episode % log_freq == 0: average = np.sum(reward_buffer[episode - log_freq: episode]) / log_freq print('Episode {:4d} | Loss: {:6.4f} | Reward: {}'.format(episode, loss, average)) agent.model.save('cartpole.cdqn') sns.set_style('dark') pd.Series(reward_buffer).rolling(window=log_freq).mean().plot() plt.xlabel('Episode') plt.ylabel('Reward') plt.title('CartPole - Reward with Time') plt.show() plt.plot(np.arange(len(losses)), losses) plt.xlabel('Episode') plt.ylabel('Loss') plt.title('CartPole - Loss with Time') plt.show()
class Agent(tf.Module): def __init__(self, config, env): self.config = config self.agent_ids = [a for a in range(config.num_agents)] self.env = env self.optimizer = tf.keras.optimizers.Adam(self.config.lr) self.replay_memory, self.beta_schedule = init_replay_memory(config) self.model = init_network(config) self.target_model = init_network(config) self.model.summary() tf.keras.utils.plot_model(self.model, to_file='./model.png') if self.config.dueling: self.agent_heads = self.build_agent_heads_dueling() self.target_agent_heads = self.build_agent_heads_dueling() self.agent_heads[0].summary() tf.keras.utils.plot_model(self.agent_heads[0], to_file='./agent_heads_model.png') else: self.agent_heads = self.build_agent_heads() self.target_agent_heads = self.build_agent_heads() # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule(schedule_timesteps=int(config.exploration_fraction * config.num_timesteps), initial_p=1.0, final_p=config.exploration_final_eps) if config.load_path is not None: self.load_models(config.load_path) self.loss = self.nstep_loss self.eps = tf.Variable(0.0) self.one_hot_agents = tf.expand_dims(tf.one_hot(self.agent_ids, len(self.agent_ids), dtype=tf.float32), axis=1) print(f'self.onehot_agent.shape is {self.one_hot_agents.shape}') def build_agent_heads(self): """ :return: list of heads for agents - gets tensorflow model and adds heads for each agent """ input_shape = self.model.output_shape heads = [] for a in self.agent_ids: name = 'head_agent_' + str(a) head_a = tf.keras.layers.Dense(units=self.config.num_actions, activation=None, kernel_initializer=tf.keras.initializers.Orthogonal(1.0), bias_initializer=tf.keras.initializers.Constant(0.0), name=name) head_a.build(input_shape) heads.append(head_a) return heads def build_agent_heads_dueling(self): """ :return: list of heads for agents - gets tensorflow model and adds heads for each agent """ input_shape = self.model.output_shape[-1] print(input_shape) heads = [] inputs = tf.keras.layers.Input(input_shape) for a in self.agent_ids: name = 'head_agent_' + str(a) with tf.name_scope(f'action_value_{name}'): action_head_a = tf.keras.layers.Dense(units=self.config.num_actions, activation=None, kernel_initializer=tf.keras.initializers.Orthogonal(1.0), bias_initializer=tf.keras.initializers.Constant(0.0), name='action_' + name)(inputs) with tf.name_scope(f'state_value_{name}'): state_head_a = tf.keras.layers.Dense(units=self.config.num_actions, activation=None, kernel_initializer=tf.keras.initializers.Orthogonal(1.0), bias_initializer=tf.keras.initializers.Constant(0.0), name='state_' + name)(inputs) action_scores_mean = tf.reduce_mean(action_head_a, 1) action_scores_centered = action_head_a - tf.expand_dims(action_scores_mean, 1) head_a = state_head_a + action_scores_centered head_a = tf.keras.Model(inputs=inputs, outputs=head_a) heads.append(head_a) return heads @tf.function def choose_action(self, obs, stochastic=True, update_eps=-1): """ :param obs: list observations one for each agent :param stochastic: True for Train phase and False for test phase :param update_eps: epsilon update for eps-greedy :return: actions: list of actions chosen by agents based on observation one for each agent """ actions = [] for a in self.agent_ids: inputs = {0: np.expand_dims(obs[a], 0), 1: self.one_hot_agents[a]} fc_values = self.model(inputs) # print(f'fc_values.shape {fc_values.shape}') q_values = self.agent_heads[a](fc_values) # print(f'q_values.shape {q_values.shape}') deterministic_actions = tf.argmax(q_values, axis=1) # print(f'deterministic_actions {deterministic_actions}') batch_size = 1 random_actions = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=self.config.num_actions, dtype=tf.int64) # print(f'random_actions {random_actions}') chose_random = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < self.eps # print(f'chose_random {chose_random}') stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) # print(f'stochastic_actions {stochastic_actions}') if stochastic: actions.append(stochastic_actions.numpy()[0]) else: actions.append(deterministic_actions.numpy()[0]) if update_eps >= 0: self.eps.assign(update_eps) # print(f'actions {actions}') return actions @tf.function def value(self, obs): """ :param obs: list observations one for each agent :return: best values based on Q-Learning formula max Q(s',a') """ values = [] for a in self.agent_ids: inputs = {0: np.expand_dims(obs[a], 0), 1: self.one_hot_agents[a]} fc_values = self.target_model(inputs) q_values = self.target_agent_heads[a](fc_values) if self.config.double_q: fc_values_using_online_net = self.model(inputs) q_values_using_online_net = self.agent_heads[a](fc_values_using_online_net) q_value_best_using_online_net = tf.argmax(q_values_using_online_net, 1) q_tp1_best = tf.reduce_sum( q_values * tf.one_hot(q_value_best_using_online_net, self.config.num_actions, dtype=tf.float32), 1) else: q_tp1_best = tf.reduce_max(q_values, 1) values.append(q_tp1_best.numpy()[0]) return values @tf.function() def nstep_loss(self, obses_t, actions, rewards, weights, agent_id): # print(f'obses_t.shape {obses_t.shape}') s = obses_t.shape obses_t = tf.reshape(obses_t, (s[0]*s[1], *s[2:])) # print(f'obses_t.shape {obses_t.shape}') s = actions.shape actions = tf.reshape(actions, (s[0] * s[1], *s[2:])) # print(f'actions.shape {actions.shape}') s = rewards.shape rewards = tf.reshape(rewards, (s[0] * s[1], *s[2:])) # print(f'rewards.shape {rewards.shape}') s = weights.shape weights = tf.reshape(weights, (s[0] * s[1], *s[2:])) # print(f'weights.shape {weights.shape}') inputs = {0: obses_t, 1: tf.tile(self.one_hot_agents[agent_id], (s[0]*s[1], 1))} fc_values = self.model(inputs) q_t = self.agent_heads[agent_id](fc_values) q_t_selected = tf.reduce_sum(q_t * tf.one_hot(actions, self.config.num_actions, dtype=tf.float32), 1) # print(f'q_t_selected.shape is {q_t_selected.shape}') td_error = q_t_selected - tf.stop_gradient(rewards) errors = huber_loss(td_error) weighted_loss = tf.reduce_mean(weights * errors) return weighted_loss, td_error @tf.function() def train(self, obses_t, actions, rewards, weights): # print(f'obses_t.shape {obses_t.shape}') td_errors = [] loss = [] with tf.GradientTape() as tape: for a in self.agent_ids: loss_a, td_error = self.loss(obses_t[:, a], actions[:, a], rewards[:, a], weights[:, a], a) loss.append(loss_a) td_errors.append(td_error) sum_loss = tf.reduce_sum(loss) sum_td_error = tf.reduce_sum(td_error) print(f'sum_loss is {sum_loss}, loss is {loss}') param = self.model.trainable_variables for a in self.agent_ids: param += self.agent_heads[a].trainable_variables # print(f'param {param}') grads = tape.gradient(sum_loss, param) if self.config.grad_norm_clipping: clipped_grads = [] for grad in grads: clipped_grads.append(tf.clip_by_norm(grad, self.config.grad_norm_clipping)) grads = clipped_grads grads_and_vars = list(zip(grads, param)) self.optimizer.apply_gradients(grads_and_vars) return sum_loss.numpy(), sum_td_error.numpy() @tf.function(autograph=False) def update_target(self): for var, var_target in zip(self.model.trainable_variables, self.target_model.trainable_variables): var_target.assign(var) vars, target_vars = [], [] for a in self.agent_ids: vars.extend(self.agent_heads[a].trainable_variables) target_vars.extend(self.target_agent_heads[a].trainable_variables) for var, var_target in zip(vars, target_vars): var_target.assign(var) @tf.function(autograph=False) def soft_update_target(self): for var, var_target in zip(self.model.trainable_variables, self.target_model.trainable_variables): var_target.assign(self.tau * var + (1.0 - self.tau) * var_target) vars, target_vars = [], [] for a in self.agent_ids: vars.extend(self.agent_heads[a].trainable_variables) target_vars.extend(self.target_agent_heads[a].trainable_variables) for var, var_target in zip(vars, target_vars): var_target.assign(self.tau * var + (1.0 - self.tau) * var_target) def save(self, save_path): self.model.save_weights(f'{save_path}/value_network.h5') self.target_model.save_weights(f'{save_path}/target_network.h5') for a in self.agent_ids: self.agent_heads[a].save_weights(f'{save_path}/agent_{a}_head.h5') self.target_agent_heads[a].save_weights(f'{save_path}/target_agent_{a}_head.h5') def load(self, load_path): self.model.load_weights(f'{load_path}/value_network.h5') self.target_model.load_weights(f'{load_path}/target_network.h5') for a in self.agent_ids: self.agent_heads[a].load_weights(f'{load_path}/agent_{a}_head.h5') self.target_agent_heads[a].load_weights(f'{load_path}/target_agent_{a}_head.h5') def learn(self): episode_rewards = [0.0] saved_mean_reward = None obs = self.env.reset() done = False # Start total timer tstart = time.time() for t in range(self.config.num_timesteps): update_eps = tf.constant(self.exploration.value(t)) if t % self.config.print_freq == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print(f'time spend to perform {t - self.config.print_freq} to {t} steps is {nseconds} ') print('eps update', self.exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], [] # mb_states = states epinfos = [] for _ in range(self.config.n_steps): actions = self.choose_action(tf.constant(obs), update_eps=update_eps) # print(f'actions is {actions}') mb_obs.append(obs.copy()) mb_actions.append(actions) mb_dones.append([float(done) for _ in self.agent_ids]) obs1, rews, done, info = self.env.step(actions) if self.config.same_reward_for_agents: rews = [np.max(rews) for _ in range(len(rews))] # for cooperative purpose same reward for every one mb_rewards.append(rews) obs = obs1 maybeepinfo = info.get('episode') if maybeepinfo : epinfos.append(maybeepinfo) episode_rewards[-1] += np.max(rews) if done: episode_rewards.append(0.0) obs = self.env.reset() mb_dones.append([float(done) for _ in self.agent_ids]) # swap axes to have lists in shape of (num_agents, num_steps, ...) mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_masks = mb_dones[:, -1] mb_dones = mb_dones[:, 1:] # print(f'before discount mb_rewards is {mb_rewards}') if self.config.gamma > 0.0: # Discount/bootstrap off value fn last_values = self.value(tf.constant(obs1)) # print(f'last_values {last_values}') for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[n] = rewards # print(f'after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add((mb_obs, mb_actions, mb_rewards, obs1, mb_masks)) if t > self.config.learning_starts and t % self.config.train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if self.config.prioritized_replay: experience = self.replay_memory.sample(self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = self.replay_memory.sample(self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None obses_t = tf.constant(obses_t) actions = tf.constant(actions) rewards = tf.constant(rewards) weights = tf.constant(weights) loss, td_errors = self.train(obses_t, actions, rewards, weights) if t > self.config.learning_starts and t % self.config.target_network_update_freq == 0: # Update target network periodically. self.soft_update_target() if t % self.config.playing_test == 0 and t != 0: self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: print(f'last 100 episode mean reward {mean_100ep_reward} in {num_episodes} playing') logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular() def play_test_games(self): num_tests = self.config.num_tests test_env = init_env(self.config, mode='test') test_rewards = np.zeros(num_tests) for i in range(num_tests): test_done = False test_obs_all = test_env.reset() # print(np.asarray(test_obs_all).shape) while not test_done: test_obs_all = tf.constant(test_obs_all) test_action_list = self.choose_action(test_obs_all, stochastic=False) test_new_obs_list, test_rew_list, test_done, _ = test_env.step(test_action_list) test_obs_all = test_new_obs_list if test_done: print(f'test_reward_dict for test {i} is {test_rew_list}') test_rewards[i] = np.mean(test_rew_list) print(f'mean reward of {num_tests} tests is {np.mean(test_rewards)}') test_env.close()