def init_demo_buffer(self, demoDataFile, update_stats=True): # function that initializes the demo buffer demoData = np.load(demoDataFile) # load the demonstration data from data file info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')] info_values = [np.empty((self.T - 1, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys] demo_data_obs = demoData['obs'] demo_data_acs = demoData['acs'] demo_data_info = demoData['info'] for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training obs, acts, goals, achieved_goals = [], [], [], [] i = 0 for transition in range(self.T - 1): obs.append([demo_data_obs[epsd][transition].get('observation')]) acts.append([demo_data_acs[epsd][transition]]) goals.append([demo_data_obs[epsd][transition].get('desired_goal')]) achieved_goals.append([demo_data_obs[epsd][transition].get('achieved_goal')]) for idx, key in enumerate(info_keys): info_values[idx][transition, i] = demo_data_info[epsd][transition][key] obs.append([demo_data_obs[epsd][self.T - 1].get('observation')]) achieved_goals.append([demo_data_obs[epsd][self.T - 1].get('achieved_goal')]) episode = dict(o=obs, u=acts, g=goals, ag=achieved_goals) for key, value in zip(info_keys, info_values): episode['info_{}'.format(key)] = value episode = convert_episode_to_batch_major(episode) global DEMO_BUFFER DEMO_BUFFER.store_episode( episode) # create the observation dict and append them into the demonstration buffer logger.debug("Demo buffer size currently ", DEMO_BUFFER.get_current_size()) # print out the demonstration buffer size if update_stats: # add transitions to normalizer to normalize the demo data as well episode['o_2'] = episode['o'][:, 1:, :] episode['ag_2'] = episode['ag'][:, 1:, :] num_normalizing_transitions = transitions_in_episode_batch(episode) transitions = self.sample_transitions(episode, num_normalizing_transitions) o, g, ag = transitions['o'], transitions['g'], transitions['ag'] transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g) # No need to preprocess the o_2 and g_2 since this is only used for stats self.o_stats.update(transitions['o']) self.g_stats.update(transitions['g']) self.o_stats.recompute_stats() self.g_stats.recompute_stats() episode.clear() logger.info("Demo buffer size: ", DEMO_BUFFER.get_current_size()) # print out the demonstration buffer size
def init_demo_buffer(self, demoDataFile, update_stats=True): #function that initializes the demo buffer demoData = np.load(demoDataFile) #load the demonstration data from data file info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')] info_values = [np.empty((self.T - 1, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys] demo_data_obs = demoData['obs'] demo_data_acs = demoData['acs'] demo_data_info = demoData['info'] for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training obs, acts, goals, achieved_goals = [], [] ,[] ,[] i = 0 for transition in range(self.T - 1): obs.append([demo_data_obs[epsd][transition].get('observation')]) acts.append([demo_data_acs[epsd][transition]]) goals.append([demo_data_obs[epsd][transition].get('desired_goal')]) achieved_goals.append([demo_data_obs[epsd][transition].get('achieved_goal')]) for idx, key in enumerate(info_keys): info_values[idx][transition, i] = demo_data_info[epsd][transition][key] obs.append([demo_data_obs[epsd][self.T - 1].get('observation')]) achieved_goals.append([demo_data_obs[epsd][self.T - 1].get('achieved_goal')]) episode = dict(o=obs, u=acts, g=goals, ag=achieved_goals) for key, value in zip(info_keys, info_values): episode['info_{}'.format(key)] = value episode = convert_episode_to_batch_major(episode) global DEMO_BUFFER DEMO_BUFFER.store_episode(episode) # create the observation dict and append them into the demonstration buffer logger.debug("Demo buffer size currently ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size if update_stats: # add transitions to normalizer to normalize the demo data as well episode['o_2'] = episode['o'][:, 1:, :] episode['ag_2'] = episode['ag'][:, 1:, :] num_normalizing_transitions = transitions_in_episode_batch(episode) transitions = self.sample_transitions(episode, num_normalizing_transitions) o, g, ag = transitions['o'], transitions['g'], transitions['ag'] transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g) # No need to preprocess the o_2 and g_2 since this is only used for stats self.o_stats.update(transitions['o']) self.g_stats.update(transitions['g']) self.o_stats.recompute_stats() self.g_stats.recompute_stats() episode.clear() logger.info("Demo buffer size: ", DEMO_BUFFER.get_current_size()) #print out the demonstration buffer size
def step(self, action, project=True): if self.step_num % self.frameskip == 0: opensim_action = openai_to_opensim_action(action) observation_dict, reward, done, info = self.env.step(opensim_action, project=False) observation_dict, observation_projection = transform_observation( observation_dict, reward_shaping=self.reward_shaping, reward_shaping_x=0, feature_embellishment=self.feature_embellishment, relative_x_pos=self.relative_x_pos, relative_z_pos=self.relative_z_pos) if done: logger.debug(" eval: reward:{:>6.1f}".format(reward)) self.prev_step = observation_dict, observation_projection, reward, done, info else: observation_dict, observation_projection, reward, done, info = self.prev_step self.step_num += 1 if project: return observation_projection, reward, done, info else: return observation_dict, reward, done, info
def shaped_reward(observation_dict, reward, done, reward_shaping_x): torso_xaxis_rwd = torso_xaxis_lean_reward(observation_dict)*reward_shaping_x torso_zaxis_rwd = torso_zaxis_lean_reward(observation_dict)*reward_shaping_x legs_xaxis_rwd = femurs_xaxis_lean_reward(observation_dict)*reward_shaping_x legs_zaxis_rwd = femurs_zaxis_lean_reward(observation_dict)*reward_shaping_x knees_rwd = knees_flexion_reward(observation_dict)*reward_shaping_x tibias_rwd = tibias_pos_reward(observation_dict)*reward_shaping_x torso_xaxis_lean = observation_dict["z_torso_xaxis_lean"] torso_zaxis_lean = observation_dict["z_torso_zaxis_lean"] z_femur_l_xaxis_lean = observation_dict["z_femur_l_xaxis_lean"] z_femur_l_zaxis_lean = observation_dict["z_femur_l_zaxis_lean"] z_femur_r_xaxis_lean = observation_dict["z_femur_r_xaxis_lean"] z_femur_r_zaxis_lean = observation_dict["z_femur_r_zaxis_lean"] knees_flexion = observation_dict["z_knees_flexion"] shaped_reward = reward + torso_xaxis_rwd + torso_zaxis_rwd + legs_xaxis_rwd + legs_zaxis_rwd + knees_rwd + tibias_rwd if done: logger.debug("train: reward:{:>6.1f} shaped reward:{:>6.1f} torso:{:>6.1f} ({:>8.3f}) legs:{:>6.1f} ({:>8.3f}, {:>8.3f}) knee flex:{:>6.1f} ({:>8.3f})".format( reward, shaped_reward, torso_xaxis_rwd, torso_xaxis_lean, legs_xaxis_rwd, z_femur_l_xaxis_lean, z_femur_r_xaxis_lean, knees_rwd, knees_flexion)) return shaped_reward
def init_HV_buffer(self, HVDataFile, update_stats=True): #print('[init_HV_buffer @ DDPG] -> Step 1: Reference passed correctly.') #hv_buffer = pickle.load(open(HVDataFile,"rb")) hv_buffer = joblib.load(HVDataFile) self.HV_BUFFER.store_buffer(hv_buffer) logger.debug("HV Buffer buffer size currently ", self.HV_BUFFER.get_current_size() ) #print out the demonstration buffer size if update_stats: # add transitions to normalizer to normalize the demo data as well transitions = self.HV_BUFFER.buffers o, g, ag = transitions['o'], transitions['g'], transitions['ag'] transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g) # No need to preprocess the o_2 and g_2 since this is only used for stats self.o_stats.update(transitions['o']) self.g_stats.update(transitions['g']) self.o_stats.recompute_stats() self.g_stats.recompute_stats()
def main(): args = parse_args() logger.configure() gamma = 0.99 tau = 0.01 normalize_returns = False normalize_observations = True batch_size = 64 action_noise = None stddev = 0.2 param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev)) critic_l2_reg = 1e-2 actor_lr = 1e-4 critic_lr = 1e-3 popart = False clip_norm = None reward_scale = 1. env = prosthetics_env.Wrapper(osim_env.ProstheticsEnv(visualize=False), frameskip=4, reward_shaping=True, reward_shaping_x=1, feature_embellishment=True, relative_x_pos=True, relative_z_pos=True) top_model_dir = 'top-models/' # create tf sessions and graphs sess_list = [] graph_list = [] for i in range(len(args.model_files)): graph_list.append(tf.Graph()) sess_list.append(tf.Session(graph=graph_list[i])) ddpg_agents = [] for i in range(len(args.model_files)): model_name = args.model_files[i] sess = sess_list[i] graph = graph_list[i] l_size = args.layer_sizes[i] with sess.as_default(): #with U.make_session(num_cpu=1, graph=g) as sess: with graph.as_default(): #tf.global_variables_initializer() # restore agents from model files and store in ddpg_agents print("Restoring from..." + model_name) # Configure components. memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(layer_norm=True, activation='relu', layer_sizes=[l_size, l_size]) actor = Actor(env.action_space.shape[-1], layer_norm=True, activation='relu', layer_sizes=[l_size, l_size]) agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) # restore adam state and param noise restore_model_path = top_model_dir + model_name saver = tf.train.Saver(max_to_keep=500) # restore network weights saver.restore(sess, restore_model_path) adam_optimizer_store = pickle.load(open(restore_model_path + ".pkl", "rb")) agent.actor_optimizer.m = adam_optimizer_store['actor_optimizer']['m'] agent.actor_optimizer.v = adam_optimizer_store['actor_optimizer']['v'] agent.actor_optimizer.t = adam_optimizer_store['actor_optimizer']['t'] agent.critic_optimizer.m = adam_optimizer_store['critic_optimizer']['m'] agent.critic_optimizer.v = adam_optimizer_store['critic_optimizer']['v'] agent.critic_optimizer.t = adam_optimizer_store['critic_optimizer']['t'] if 'param_noise' in adam_optimizer_store: agent.param_noise = adam_optimizer_store['param_noise'] # intialize and prepare agent session. agent.initialize(sess) #sess.graph.finalize() agent.reset() ddpg_agents.append(agent) agent = BlendedAgent(ddpg_agents, sess_list, graph_list) if args.evaluation: # setup eval env eval_env = prosthetics_env.EvaluationWrapper(osim_env.ProstheticsEnv(visualize=False), frameskip=4, reward_shaping=True, reward_shaping_x=1, feature_embellishment=True, relative_x_pos=True, relative_z_pos=True) eval_env.change_model(model=('3D').upper(), prosthetic=True, difficulty=0, seed=0) eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval')) nb_eval_steps = 1000 # reward, mean_q, final_steps = evaluate_one_episode(eval_env, ddpg_agents, sess_list, graph_list, # nb_eval_steps=nb_eval_steps, # render=False) reward, mean_q, final_steps = evaluate_one_episode(eval_env, agent, nb_eval_steps, render=False) print("Reward: " + str(reward)) print("Mean Q: " + str(mean_q)) print("Final num steps: " + str(final_steps)) # Submit to crowdai competition. What a hack. :) # if crowdai_client is not None and crowdai_token is not None and eval_env is not None: crowdai_submit_count = 0 if args.crowdai_submit: remote_base = "http://grader.crowdai.org:1729" crowdai_client = Client(remote_base) eval_obs_dict = crowdai_client.env_create(args.crowdai_token, env_id="ProstheticsEnv") eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation( eval_obs_dict, reward_shaping=True, reward_shaping_x=1., feature_embellishment=True, relative_x_pos=True, relative_z_pos=True) while True: action, _ = agent.pi(eval_obs_projection, apply_noise=False, compute_Q=False) submit_action = prosthetics_env.openai_to_crowdai_submit_action(action) clipped_submit_action = np.clip(submit_action, 0., 1.) actions_equal = clipped_submit_action == submit_action if not np.all(actions_equal): logger.debug("crowdai_submit_count:", crowdai_submit_count) logger.debug(" openai-action:", action) logger.debug(" submit-action:", submit_action) crowdai_submit_count += 1 [eval_obs_dict, reward, done, info] = crowdai_client.env_step(clipped_submit_action.tolist(), True) # [eval_obs_dict, reward, done, info] = crowdai_client.env_step(agent.pi(eval_obs_projection, apply_noise=False, compute_Q=False), True) eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation( eval_obs_dict, reward_shaping=True, reward_shaping_x=1., feature_embellishment=True, relative_x_pos=True, relative_z_pos=True) if done: logger.debug("done: crowdai_submit_count:", crowdai_submit_count) eval_obs_dict = crowdai_client.env_reset() if not eval_obs_dict: break logger.debug("done: eval_obs_dict exists after reset") eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation( eval_obs_dict, reward_shaping=True, reward_shaping_x=1., feature_embellishment=True, relative_x_pos=True, relative_z_pos=True) crowdai_client.submit() for i in range(len(sess_list)): sess_list[i].close()
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } logger.log('act_params:' + str(act_params)) act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False logger.info('start Loading model from {}'.format(load_path)) if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.info('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None and os.path.exists(load_path): load_variables(load_path) logger.info('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True logger.debug('feature obs:' + str(np.array(obs)[None])) action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) logger.debug('feature replay_buffer:') logger.debug(str(obs)) logger.debug(str(action)) logger.debug(str(rew)) logger.debug(str(new_obs)) logger.debug(str(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() #show deepq learning information env.render() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act
def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, saved_model_basename, restore_model_name, crowdai_client, crowdai_token, reward_shaping, feature_embellishment, relative_x_pos, relative_z_pos, tau=0.01, eval_env=None, param_noise_adaption_interval=50): rank = MPI.COMM_WORLD.Get_rank() assert (np.abs(env.action_space.low) == env.action_space.high ).all() # we assume symmetric actions. max_action = env.action_space.high logger.info( 'scaling actions by {} before executing in env'.format(max_action)) agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) # Set up logging stuff only for a single worker. saved_model_dir = 'saved-models/' if saved_model_basename is None: saved_model_basename = ''.join( random.choices(string.ascii_lowercase + string.digits, k=8)) saved_model_path = saved_model_dir + saved_model_basename if restore_model_name: restore_model_path = restore_model_name if not pathlib.Path(restore_model_path + '.index').is_file(): restore_model_path = saved_model_dir + restore_model_name max_to_keep = 500 eval_reward_threshold_to_keep = 300 saver = tf.train.Saver(max_to_keep=max_to_keep) adam_optimizer_store = dict() adam_optimizer_store['actor_optimizer'] = dict() adam_optimizer_store['critic_optimizer'] = dict() #eval_episode_rewards_history = deque(maxlen=100) #episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: try: if restore_model_name: logger.info("Restoring from model at", restore_model_path) #saver.restore(sess, tf.train.latest_checkpoint(model_path)) saver.restore(sess, restore_model_path) else: logger.info("Creating new model") sess.run(tf.global_variables_initializer( )) # this should happen here and not in the agent right? except InvalidArgumentError as exc: if "Assign requires shapes of both tensors to match." in str(exc): print("Unable to restore model from {:s}.".format( restore_model_path)) print( "Chances are you're trying to restore a model with reward embellishment into an environment without reward embellishment (or vice versa). Unfortunately this isn't supported (yet)." ) print(exc.message) sys.exit() else: raise exc # Prepare everything. agent.initialize(sess) sess.graph.finalize() agent.reset() # restore adam optimizer try: if restore_model_name: logger.info("Restoring pkl file with adam state", restore_model_path) #saver.restore(sess, tf.train.latest_checkpoint(model_path)) adam_optimizer_store = pickle.load( open(restore_model_path + ".pkl", "rb")) agent.actor_optimizer.m = adam_optimizer_store[ 'actor_optimizer']['m'] agent.actor_optimizer.v = adam_optimizer_store[ 'actor_optimizer']['v'] agent.actor_optimizer.t = adam_optimizer_store[ 'actor_optimizer']['t'] agent.critic_optimizer.m = adam_optimizer_store[ 'critic_optimizer']['m'] agent.critic_optimizer.v = adam_optimizer_store[ 'critic_optimizer']['v'] agent.critic_optimizer.t = adam_optimizer_store[ 'critic_optimizer']['t'] if 'param_noise' in adam_optimizer_store: agent.param_noise = adam_optimizer_store['param_noise'] except: print("Unable to restore adam state from {:s}.".format( restore_model_path)) obs = env.reset() done = False episode_reward = 0. #episode_step = 0 #episodes = 0 #t = 0 #epoch_episode_steps = [] #epoch_episode_eval_rewards = [] #epoch_episode_eval_steps = [] #epoch_start_time = time.time() #epoch_actions = [] #epoch_episodes = 0 for epoch in range(nb_epochs): start_time = time.time() epoch_episode_rewards = [] epoch_qs = [] eval_episode_rewards = [] eval_qs = [] eval_steps = [] epoch_actor_losses = [] epoch_critic_losses = [] worth_keeping = False for cycle in range(nb_epoch_cycles): # Perform rollouts. for t_rollout in range(nb_rollout_steps): # Predict next action. action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape == env.action_space.shape # Execute next action. if rank == 0 and render: env.render() assert max_action.shape == action.shape #new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) new_obs, r, done, info = env.step(action) #t += 1 if rank == 0 and render: env.render() episode_reward += r #episode_step += 1 # Book-keeping. #epoch_actions.append(action) epoch_qs.append(q) agent.store_transition(obs, action, r, new_obs, done) obs = new_obs if done: # Episode done. epoch_episode_rewards.append(episode_reward) #episode_rewards_history.append(episode_reward) #epoch_episode_steps.append(episode_step) episode_reward = 0. #episode_step = 0 #epoch_episodes += 1 #episodes += 1 agent.reset() obs = env.reset() # Train. #epoch_adaptive_distances = [] for t_train in range(nb_train_steps): # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() #epoch_adaptive_distances.append(distance) cl, al = agent.train() epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() # Submit to crowdai competition. What a hack. :) #if crowdai_client is not None and crowdai_token is not None and eval_env is not None: crowdai_submit_count = 0 if crowdai_client is not None and crowdai_token is not None: eval_obs_dict = crowdai_client.env_create( crowdai_token, env_id="ProstheticsEnv") eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation( eval_obs_dict, reward_shaping=reward_shaping, reward_shaping_x=1., feature_embellishment=feature_embellishment, relative_x_pos=relative_x_pos, relative_z_pos=relative_z_pos) while True: action, _ = agent.pi(eval_obs_projection, apply_noise=False, compute_Q=False) submit_action = prosthetics_env.openai_to_crowdai_submit_action( action) clipped_submit_action = np.clip(submit_action, 0., 1.) actions_equal = clipped_submit_action == submit_action if not np.all(actions_equal): logger.debug("crowdai_submit_count:", crowdai_submit_count) logger.debug(" openai-action:", action) logger.debug(" submit-action:", submit_action) crowdai_submit_count += 1 [eval_obs_dict, reward, done, info] = crowdai_client.env_step( clipped_submit_action.tolist(), True) #[eval_obs_dict, reward, done, info] = crowdai_client.env_step(agent.pi(eval_obs_projection, apply_noise=False, compute_Q=False), True) eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation( eval_obs_dict, reward_shaping=reward_shaping, reward_shaping_x=1., feature_embellishment=feature_embellishment, relative_x_pos=relative_x_pos, relative_z_pos=relative_z_pos) if done: logger.debug("done: crowdai_submit_count:", crowdai_submit_count) eval_obs_dict = crowdai_client.env_reset() if not eval_obs_dict: break logger.debug( "done: eval_obs_dict exists after reset") eval_obs_dict, eval_obs_projection = prosthetics_env.transform_observation( eval_obs_dict, reward_shaping=reward_shaping, reward_shaping_x=1., feature_embellishment=feature_embellishment, relative_x_pos=relative_x_pos, relative_z_pos=relative_z_pos) crowdai_client.submit() return # kids, don't try any of these (expedient hacks) at home! if eval_env: eval_episode_reward_mean, eval_q_mean, eval_step_mean = evaluate_n_episodes( 3, eval_env, agent, nb_eval_steps, render_eval) if eval_episode_reward_mean >= eval_reward_threshold_to_keep: worth_keeping = True mpi_size = MPI.COMM_WORLD.Get_size() # Log stats. # XXX shouldn't call np.mean on variable length lists duration = time.time() - start_time if nb_epochs and nb_epoch_cycles and nb_train_steps > 0: #stats = agent.get_stats() #combined_stats = stats.copy() combined_stats = {} combined_stats['train/epoch_episode_reward_mean'] = np.mean( epoch_episode_rewards) #combined_stats['rollout/return_history'] = np.mean(episode_rewards_history) #combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps) #combined_stats['rollout/actions_mean'] = np.mean(epoch_actions) combined_stats['train/epoch_Q_mean'] = np.mean(epoch_qs) combined_stats['train/epoch_loss_actor'] = np.mean( epoch_actor_losses) combined_stats['train/epoch_loss_critic'] = np.mean( epoch_critic_losses) #combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances) combined_stats['train/epoch_duration'] = duration #combined_stats['epoch/steps_per_second'] = float(t) / float(duration) #combined_stats['total/episodes'] = episodes #combined_stats['rollout/episodes'] = epoch_episodes #combined_stats['rollout/actions_std'] = np.std(epoch_actions) #combined_stats['memory/rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss else: combined_stats = {} # Evaluation statistics. if eval_env: combined_stats[ 'eval/epoch_episode_reward_mean'] = eval_episode_reward_mean # np.mean(eval_episode_rewards) #combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history) #combined_stats['eval/epoch_episode_reward_std'] = np.std(eval_episode_rewards) combined_stats[ 'eval/epoch_Q_mean'] = eval_q_mean # np.mean(eval_qs) #combined_stats['eval/episodes'] = len(eval_episode_rewards) combined_stats[ 'eval/steps_mean'] = eval_step_mean # np.mean(eval_steps) def as_scalar(x): if isinstance(x, np.ndarray): assert x.size == 1 return x[0] elif np.isscalar(x): return x else: raise ValueError('expected scalar, got %s' % x) combined_stats_sums = MPI.COMM_WORLD.allreduce( np.array([as_scalar(x) for x in combined_stats.values()])) combined_stats = { k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums) } # Total statistics. #combined_stats['total/epochs'] = epoch + 1 #combined_stats['total/steps'] = t for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.info('') logger.info('Epoch', epoch) logger.dump_tabular() logdir = logger.get_dir() if worth_keeping and rank == 0 and nb_epochs and nb_epoch_cycles and nb_train_steps and nb_rollout_steps: logger.info( 'Saving model to', saved_model_dir + saved_model_basename + '-' + str(epoch)) saver.save(sess, saved_model_path, global_step=epoch, write_meta_graph=False) adam_optimizer_store['actor_optimizer'][ 'm'] = agent.actor_optimizer.m adam_optimizer_store['actor_optimizer'][ 'v'] = agent.actor_optimizer.v adam_optimizer_store['actor_optimizer'][ 't'] = agent.actor_optimizer.t adam_optimizer_store['critic_optimizer'][ 'm'] = agent.critic_optimizer.m adam_optimizer_store['critic_optimizer'][ 'v'] = agent.critic_optimizer.v adam_optimizer_store['critic_optimizer'][ 't'] = agent.critic_optimizer.t adam_optimizer_store['param_noise'] = agent.param_noise pickle.dump( adam_optimizer_store, open((saved_model_path + "-" + str(epoch) + ".pkl"), "wb")) old_epoch = epoch - max_to_keep if old_epoch >= 0: try: os.remove(saved_model_path + "-" + str(old_epoch) + ".pkl") except OSError: pass if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(), f)
def update_lr(self, new_actor_lr=None, new_critic_lr=None): if new_actor_lr: self.actor_lr = new_actor_lr if new_critic_lr: self.critic_lr = new_critic_lr logger.debug('Updated learning rates.')