def setup_critic_optimizer(self): logger.info('setting up critic optimizer') normalized_critic_target_tf = tf.clip_by_value( normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1]) self.critic_loss = tf.reduce_mean( tf.square(self.normalized_critic_tf - normalized_critic_target_tf)) if self.critic_l2_reg > 0.: critic_reg_vars = [ var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name ] for var in critic_reg_vars: logger.info(' regularizing: {}'.format(var.name)) logger.info(' applying l2 regularization with {}'.format( self.critic_l2_reg)) critic_reg = tc.layers.apply_regularization( tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars) self.critic_loss += critic_reg critic_shapes = [ var.get_shape().as_list() for var in self.critic.trainable_vars ] critic_nb_params = sum( [reduce(lambda x, y: x * y, shape) for shape in critic_shapes]) logger.info(' critic shapes: {}'.format(critic_shapes)) logger.info(' critic params: {}'.format(critic_nb_params)) self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm) self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08, single_train=self.single_train)
def __init__(self, skills): self.skillset = [] self.params_start_idx = [] param_idx = 0 for skill in skills: self.params_start_idx.append(param_idx) self.skillset.append(DDPGSkill(**skill)) param_idx += self.skillset[-1].num_params if MPI.COMM_WORLD.Get_rank() == 0: logger.info("Skill set init!\n" + "#" * 50)
def get_target_updates(vars, target_vars, tau): logger.info('setting up target updates ...') soft_updates = [] init_updates = [] assert len(vars) == len(target_vars) for var, target_var in zip(vars, target_vars): logger.info(' {} <- {}'.format(target_var.name, var.name)) init_updates.append(tf.assign(target_var, var)) soft_updates.append(tf.assign(target_var, (1. - tau) * target_var + tau * var)) assert len(init_updates) == len(vars) assert len(soft_updates) == len(vars) return tf.group(*init_updates), tf.group(*soft_updates)
def restore_skill(self, path, sess): self.sess = sess print('Restore path : ',path) # checkpoint = tf.train.get_checkpoint_state(path) # if checkpoint and checkpoint.model_checkpoint_path: model_checkpoint_path = read_checkpoint_local(osp.join(path, "model")) if model_checkpoint_path: # model_checkpoint_path = osp.join(path, osp.basename(checkpoint.model_checkpoint_path)) self.loader.restore(U.get_session(), model_checkpoint_path) if MPI.COMM_WORLD.Get_rank() == 0: logger.info("Successfully loaded %s skill"%self.skill_name)
def get_perturbed_actor_updates(actor, perturbed_actor, param_noise_stddev): assert len(actor.vars) == len(perturbed_actor.vars) assert len(actor.perturbable_vars) == len(perturbed_actor.perturbable_vars) updates = [] for var, perturbed_var in zip(actor.vars, perturbed_actor.vars): if var in actor.perturbable_vars: logger.info(' {} <- {} + noise'.format(perturbed_var.name, var.name)) updates.append(tf.assign(perturbed_var, var + tf.random_normal(tf.shape(var), mean=0., stddev=param_noise_stddev))) else: logger.info(' {} <- {}'.format(perturbed_var.name, var.name)) updates.append(tf.assign(perturbed_var, var)) assert len(updates) == len(actor.vars) return tf.group(*updates)
def setup_param_noise(self, normalized_obs0): assert self.param_noise is not None # Configure perturbed actor. param_noise_actor = copy(self.actor) param_noise_actor.name = 'param_noise_actor' self.perturbed_actor_tf = param_noise_actor(normalized_obs0) logger.info('setting up param noise') self.perturb_policy_ops = get_perturbed_actor_updates(self.actor, param_noise_actor, self.param_noise_stddev) # Configure separate copy for stddev adoption. adaptive_param_noise_actor = copy(self.actor) adaptive_param_noise_actor.name = 'adaptive_param_noise_actor' adaptive_actor_tf = adaptive_param_noise_actor(normalized_obs0) self.perturb_adaptive_policy_ops = get_perturbed_actor_updates(self.actor, adaptive_param_noise_actor, self.param_noise_stddev) self.adaptive_policy_distance = tf.sqrt(tf.reduce_mean(tf.square(self.actor_tf - adaptive_actor_tf)))
def restore_skill(self, path, sess): self.sess = sess print('Restore path : ', path) model_checkpoint_path = read_checkpoint_local(osp.join(path, "model")) if model_checkpoint_path: self.loader_ddpg.restore(U.get_session(), model_checkpoint_path) if MPI.COMM_WORLD.Get_rank() == 0: logger.info("Successfully loaded %s skill" % self.skill_name) model_checkpoint_path = read_checkpoint_local( osp.join(path, "pred_model")) if model_checkpoint_path: self.loader_successor_model.restore(U.get_session(), model_checkpoint_path) if MPI.COMM_WORLD.Get_rank() == 0: logger.info("Successfully loaded pred model for %s skill" % self.skill_name)
def run(env_id, seed, evaluation, **kwargs): # Create envs. env = gym.make(env_id) # print(env.action_space.shape) logger.info("Env info") logger.info(env.__doc__) logger.info("-" * 20) gym.logger.setLevel(logging.WARN) if kwargs['skillset']: skillset_file = __import__("HER.skills.%s" % kwargs['skillset'], fromlist=['']) my_skill_set = SkillSet(skillset_file.skillset) else: my_skill_set = None set_global_seeds(seed) env.seed(seed) model_path = os.path.join(kwargs['restore_dir'], "model") testing.testing(env, model_path, my_skill_set, kwargs['render_eval'], kwargs['commit_for'], kwargs['nb_eval_episodes']) env.close()
def setup_actor_optimizer(self): if MPI.COMM_WORLD.Get_rank() == 0: logger.info('setting up actor optimizer') ## as used in Hindsight Experience Replay to stop saturation in tanh if self.actor_reg: preactivation = tf.get_default_graph().get_tensor_by_name( 'actor/preactivation:0') self.actor_loss = -tf.reduce_mean( self.critic_with_actor_tf) + tf.reduce_mean( tf.square(preactivation)) else: self.actor_loss = -tf.reduce_mean(self.critic_with_actor_tf) actor_shapes = [ var.get_shape().as_list() for var in self.actor.trainable_vars ] actor_nb_params = sum( [reduce(lambda x, y: x * y, shape) for shape in actor_shapes]) logger.info(' actor shapes: {}'.format(actor_shapes)) logger.info(' actor params: {}'.format(actor_nb_params)) self.actor_grads = U.flatgrad(self.actor_loss, self.actor.trainable_vars, clip_norm=self.clip_norm) self.actor_optimizer = MpiAdam(var_list=self.actor.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08)
def mpi_moments(x, axis=0): x = np.asarray(x, dtype='float64') newshape = list(x.shape) newshape.pop(axis) n = np.prod(newshape, dtype=int) totalvec = np.zeros(n * 2 + 1, 'float64') addvec = np.concatenate([ x.sum(axis=axis).ravel(), np.square(x).sum(axis=axis).ravel(), np.array([x.shape[axis]], dtype='float64') ]) try: MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM) # logger.info("moment without error") # logger.info(addvec) # logger.info(type(addvec)) # logger.info("-"*50) except MPI.Exception as err: logger.info("moment error") logger.info(err) logger.info(addvec) logger.info(type(addvec)) logger.info("-" * 50) sum = totalvec[:n] sumsq = totalvec[n:2 * n] count = totalvec[2 * n] if count == 0: mean = np.empty(newshape) mean[:] = np.nan std = np.empty(newshape) std[:] = np.nan else: mean = sum / count std = np.sqrt(np.maximum(sumsq / count - np.square(mean), 0)) return mean, std, count
def run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs): # Configure things. rank = MPI.COMM_WORLD.Get_rank() if rank != 0: logger.set_level(logger.DISABLED) # Create envs. env = gym.make(env_id) logger.debug("Env info") logger.debug(env.__doc__) logger.debug("-" * 20) gym.logger.setLevel(logging.WARN) if evaluation and rank == 0: if kwargs['eval_env_id']: eval_env_id = kwargs['eval_env_id'] else: eval_env_id = env_id eval_env = gym.make(eval_env_id) # del eval_env_id from kwargs del kwargs['eval_env_id'] else: eval_env = None # Parse noise_type action_noise = None param_noise = None nb_actions = env.action_space.shape[-1] for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'epsnorm' in current_noise_type: _, stddev, epsilon = current_noise_type.split('_') action_noise = EpsilonNormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions), epsilon=float(epsilon)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) # Configure components. memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(layer_norm=layer_norm) actor = Actor(nb_actions, layer_norm=layer_norm) # Seed everything to make things reproducible. seed = seed + 1000000 * rank tf.reset_default_graph() # importing the current skill configs if kwargs['look_ahead'] and kwargs['skillset']: skillset_file = __import__("HER.skills.%s" % kwargs['skillset'], fromlist=['']) my_skill_set = SkillSet(skillset_file.skillset) else: my_skill_set = None set_global_seeds(seed) env.seed(seed) if eval_env is not None: eval_env.seed(seed) # Disable logging for rank != 0 to avoid noise. if rank == 0: logger.info('rank {}: seed={}, logdir={}'.format( rank, seed, logger.get_dir())) start_time = time.time() training.train(env=env, eval_env=eval_env, param_noise=param_noise, action_noise=action_noise, actor=actor, critic=critic, memory=memory, my_skill_set=my_skill_set, **kwargs) env.close() if eval_env is not None: eval_env.close() if rank == 0: logger.info('total runtime: {}s'.format(time.time() - start_time))
def run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs): # Configure things. rank = MPI.COMM_WORLD.Get_rank() if rank != 0: logger.set_level(logger.DISABLED) # Create envs. env = gym.make(env_id) env = bench.Monitor( env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank))) gym.logger.setLevel(logging.WARN) if evaluation and rank == 0: eval_env = gym.make(env_id) eval_env = bench.Monitor(eval_env, os.path.join(logger.get_dir(), 'gym_eval')) #env = bench.Monitor(env, None) else: eval_env = None # Parse noise_type action_noise = None param_noise = None nb_actions = env.action_space.shape[-1] for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) # Configure components. memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(layer_norm=layer_norm) actor = Actor(nb_actions, layer_norm=layer_norm) # Seed everything to make things reproducible. seed = seed + 1000000 * rank logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir())) tf.reset_default_graph() set_global_seeds(seed) env.seed(seed) if eval_env is not None: eval_env.seed(seed) # Disable logging for rank != 0 to avoid noise. if rank == 0: start_time = time.time() training.train(env=env, eval_env=eval_env, param_noise=param_noise, action_noise=action_noise, actor=actor, critic=critic, memory=memory, **kwargs) env.close() if eval_env is not None: eval_env.close() if rank == 0: logger.info('total runtime: {}s'.format(time.time() - start_time))
def run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs): # Configure things. rank = MPI.COMM_WORLD.Get_rank() if rank != 0: logger.set_level(logger.DISABLED) # Create envs. env = gym.make(env_id) # print(env.action_space.shape) logger.info("Env info") logger.info(env.__doc__) logger.info("-" * 20) gym.logger.setLevel(logging.WARN) if evaluation and rank == 0: if kwargs['eval_env_id']: eval_env_id = kwargs['eval_env_id'] else: eval_env_id = env_id eval_env = gym.make(eval_env_id) # del eval_env_id from kwargs del kwargs['eval_env_id'] else: eval_env = None # Parse noise_type action_noise = None param_noise = None tf.reset_default_graph() ## this is a HACK if kwargs['skillset']: # import HER.skills.set2 as skillset_file skillset_file = __import__("HER.skills.%s" % kwargs['skillset'], fromlist=['']) my_skill_set = SkillSet(skillset_file.skillset) nb_actions = my_skill_set.params + my_skill_set.len else: nb_actions = env.action_space.shape[-1] for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'epsnorm' in current_noise_type: _, stddev, epsilon = current_noise_type.split('_') action_noise = EpsilonNormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions), epsilon=float(epsilon)) elif 'pepsnorm' in current_noise_type: _, stddev, epsilon = current_noise_type.split('_') action_noise = EpsilonNormalParameterizedActionNoise( mu=np.zeros(my_skill_set.num_params), sigma=float(stddev) * np.ones(my_skill_set.num_params), epsilon=float(epsilon), discrete_actions_dim=my_skill_set.len) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) # Configure components. memory = Memory(limit=int(1e6), action_shape=(nb_actions, ), observation_shape=env.observation_space.shape) if kwargs['newarch']: critic = Critic(layer_norm=layer_norm, hidden_unit_list=[400, 300]) elif kwargs['newcritic']: critic = NewCritic(layer_norm=layer_norm) else: critic = Critic(layer_norm=layer_norm) if kwargs['skillset'] is None: if kwargs['newarch']: actor = Actor(discrete_action_size=env.env.discrete_action_size, cts_action_size=nb_actions - env.env.discrete_action_size, layer_norm=layer_norm, hidden_unit_list=[400, 300]) else: actor = Actor(discrete_action_size=env.env.discrete_action_size, cts_action_size=nb_actions - env.env.discrete_action_size, layer_norm=layer_norm) my_skill_set = None else: # pass # get the skillset and make actor accordingly if kwargs['newarch']: actor = Actor(discrete_action_size=my_skill_set.len, cts_action_size=nb_actions - my_skill_set.len, layer_norm=layer_norm, hidden_unit_list=[400, 300]) else: actor = Actor(discrete_action_size=my_skill_set.len, cts_action_size=nb_actions - my_skill_set.len, layer_norm=layer_norm) # Seed everything to make things reproducible. seed = seed + 1000000 * rank logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir())) set_global_seeds(seed) env.seed(seed) if eval_env is not None: eval_env.seed(seed) # Disable logging for rank != 0 to avoid noise. if rank == 0: start_time = time.time() training.train(env=env, eval_env=eval_env, param_noise=param_noise, action_noise=action_noise, actor=actor, critic=critic, memory=memory, my_skill_set=my_skill_set, **kwargs) env.close() if eval_env is not None: eval_env.close() if rank == 0: logger.info('total runtime: {}s'.format(time.time() - start_time))
boolean_flag(parser, 'newarch', default=False) boolean_flag(parser, 'newcritic', default=False) boolean_flag(parser, 'select-action', default=False) args = parser.parse_args() # we don't directly specify timesteps for this script, so make sure that if we do specify them # they agree with the other parameters if args.num_timesteps is not None: assert (args.num_timesteps == args.nb_epochs * args.nb_epoch_cycles * args.nb_rollout_steps) dict_args = vars(args) del dict_args['num_timesteps'] return dict_args if __name__ == '__main__': args = parse_args() if MPI.COMM_WORLD.Get_rank() == 0: logger.configure(dir=args["log_dir"]) logger.info(str(args)) # Run actual script. try: run(**args) except KeyboardInterrupt: print("Exiting!")
def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_episodes, batch_size, memory, tau=0.05, eval_env=None, param_noise_adaption_interval=50, **kwargs): rank = MPI.COMM_WORLD.Get_rank() assert (np.abs(env.action_space.low) == env.action_space.high ).all() # we assume symmetric actions. max_action = env.action_space.high if "dologging" in kwargs: dologging = kwargs["dologging"] else: dologging = True if "tf_sum_logging" in kwargs: tf_sum_logging = kwargs["tf_sum_logging"] else: tf_sum_logging = False if "invert_grad" in kwargs: invert_grad = kwargs["invert_grad"] else: invert_grad = False if "actor_reg" in kwargs: actor_reg = kwargs["actor_reg"] else: actor_reg = False if dologging: logger.debug( 'scaling actions by {} before executing in env'.format(max_action)) if kwargs['look_ahead']: look_ahead = True look_ahead_planner = Planning_with_memories( skillset=kwargs['my_skill_set'], env=env, num_samples=kwargs['num_samples']) exploration = LinearSchedule(schedule_timesteps=int(nb_epochs * nb_epoch_cycles), initial_p=1.0, final_p=kwargs['exploration_final_eps']) else: look_ahead = False if kwargs['skillset']: action_shape = (kwargs['my_skill_set'].len + kwargs['my_skill_set'].num_params, ) else: action_shape = env.action_space.shape agent = DDPG(actor, critic, memory, env.observation_space.shape, action_shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, inverting_grad=invert_grad, actor_reg=actor_reg) if dologging and MPI.COMM_WORLD.Get_rank() == 0: logger.debug('Using agent with the following configuration:') logger.debug(str(agent.__dict__.items())) # should have saver for all thread to restore. But dump only using 1 saver saver = tf.train.Saver(keep_checkpoint_every_n_hours=2, max_to_keep=20, save_relative_paths=True) save_freq = kwargs["save_freq"] # step = 0 global_t = 0 eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) ## get the session with the current graph => identical graph is used for each session with U.single_threaded_session() as sess: # Set summary saver if dologging and tf_sum_logging and rank == 0: tf.summary.histogram("actor_grads", agent.actor_grads) tf.summary.histogram("critic_grads", agent.critic_grads) actor_trainable_vars = actor.trainable_vars for var in actor_trainable_vars: tf.summary.histogram(var.name, var) critic_trainable_vars = critic.trainable_vars for var in critic_trainable_vars: tf.summary.histogram(var.name, var) tf.summary.histogram("actions_out", agent.actor_tf) tf.summary.histogram("critic_out", agent.critic_tf) tf.summary.histogram("target_Q", agent.target_Q) summary_var = tf.summary.merge_all() writer_t = tf.summary.FileWriter( osp.join(logger.get_dir(), 'train'), sess.graph) else: summary_var = tf.no_op() # Prepare everything. agent.initialize(sess) sess.graph.finalize() ## restore if kwargs['skillset']: ## restore skills my_skill_set = kwargs['my_skill_set'] my_skill_set.restore_skillset(sess=sess) ## restore current controller if kwargs["restore_dir"] is not None: restore_dir = osp.join(kwargs["restore_dir"], "model") if (restore_dir is not None) and rank == 0: print('Restore path : ', restore_dir) model_checkpoint_path = read_checkpoint_local(restore_dir) if model_checkpoint_path: saver.restore(U.get_session(), model_checkpoint_path) logger.info("checkpoint loaded:" + str(model_checkpoint_path)) tokens = model_checkpoint_path.split("-")[-1] # set global step global_t = int(tokens) print(">>> global step set:", global_t) agent.reset() obs = env.reset() # maintained across epochs episodes = 0 t = 0 start_time = time.time() # creating vars. this is done to keep the syntax for deleting the list simple a[:] = [] epoch_episode_rewards = [] epoch_episode_steps = [] epoch_actions = [] epoch_actor_losses = [] epoch_critic_losses = [] if param_noise is not None: epoch_adaptive_distances = [] eval_episode_rewards = [] eval_episode_success = [] # for each episode done = False episode_reward = 0. episode_step = 0 ## containers for hierarchical hindsight if kwargs["her"]: logger.debug("-" * 50 + '\nWill create HER\n' + "-" * 50) # per episode states, pactions, sub_states = [], [], [] print("Ready to go!") for epoch in range(global_t, nb_epochs): # stat containers epoch_episodes = 0. epoch_start_time = time.time() epoch_episode_rewards[:] = [] epoch_episode_steps[:] = [] epoch_actions[:] = [ ] # action mean: don't know if this indicates anything epoch_actor_losses[:] = [] epoch_critic_losses[:] = [] if param_noise is not None: epoch_adaptive_distances[:] = [] eval_episode_rewards[:] = [] eval_episode_success[:] = [] for cycle in range(nb_epoch_cycles): # Perform rollouts. for t_rollout in range( int(nb_rollout_steps / MPI.COMM_WORLD.Get_size())): # print(rank, t_rollout) # Predict next action. # exploration check if kwargs['look_ahead'] and (np.random.rand( ) < exploration.value(epoch * nb_epoch_cycles + cycle)): paction, planner_info = look_ahead_planner.create_plan( obs) else: paction, _ = agent.pi(obs, apply_noise=True, compute_Q=True) if (my_skill_set): ## break actions into primitives and their params primitives_prob = paction[:kwargs['my_skill_set'].len] primitive_id = np.argmax(primitives_prob) # print("skill chosen", primitive_id) r = 0. skill_obs = obs.copy() if kwargs['her']: curr_sub_states = [skill_obs.copy()] for _ in range(kwargs['commit_for']): action = my_skill_set.pi( primitive_id=primitive_id, obs=skill_obs.copy(), primitive_params=paction[my_skill_set.len:]) # Execute next action. if rank == 0 and render: sleep(0.1) env.render() assert max_action.shape == action.shape new_obs, skill_r, done, info = env.step( max_action * action ) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) r += skill_r if kwargs['her']: curr_sub_states.append(new_obs.copy()) skill_obs = new_obs if done or my_skill_set.termination( new_obs, primitive_id, primitive_params=paction[my_skill_set. len:]): break # assuming the skill is trained from different reward signal r = skill_r else: action = paction # Execute next action. if rank == 0 and render: env.render() assert max_action.shape == action.shape new_obs, r, done, info = env.step( max_action * action ) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) assert action.shape == env.action_space.shape t += 1 episode_reward += r episode_step += 1 # Book-keeping. epoch_actions.append(paction) agent.store_transition(obs, paction, r, new_obs, done) # storing info for hindsight if kwargs['her']: states.append(obs.copy()) pactions.append(paction.copy()) sub_states.append(curr_sub_states) # print(planner_info['next_state'][:6], new_obs[:6]) obs = new_obs if done: # Episode done. # update stats epoch_episode_rewards.append(episode_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(episode_step) epoch_episodes += 1 episodes += 1 # reinit episode_reward = 0. episode_step = 0 agent.reset() obs = env.reset() if kwargs["her"]: # logger.info("-"*50 +'\nCreating HER\n' + "-"*50) # create hindsight experience replay if kwargs['skillset']: her_states, her_rewards = env.apply_hierarchical_hindsight( states, pactions, new_obs.copy(), sub_states) else: her_states, her_rewards = env.apply_hindsight( states, pactions, new_obs.copy()) ## store her transitions: her_states: n+1, her_rewards: n for her_i in range(len(her_states) - 2): agent.store_transition(her_states[her_i], pactions[her_i], her_rewards[her_i], her_states[her_i + 1], False) #store last transition agent.store_transition(her_states[-2], pactions[-1], her_rewards[-1], her_states[-1], True) ## refresh the storage containers states[:], pactions[:] = [], [] if kwargs['skillset']: sub_states[:] = [] # print(rank, "Training!") # Train. for t_train in range(nb_train_steps): # print(rank, t_train) # Adapt param noise, if necessary. if (memory.nb_entries >= batch_size) and ( t % param_noise_adaption_interval == 0) and (param_noise is not None): distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) cl, al, current_summary = agent.train(summary_var) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() if dologging and tf_sum_logging and rank == 0: writer_t.add_summary( current_summary, epoch * nb_epoch_cycles * nb_train_steps + cycle * nb_train_steps + t_train) # print("Evaluating!") # Evaluate after training is done. if (eval_env is not None) and rank == 0: for _ in range(nb_eval_episodes): eval_episode_reward = 0. eval_obs = eval_env.reset() eval_obs_start = eval_obs.copy() eval_done = False while (not eval_done): eval_paction, _ = agent.pi(eval_obs, apply_noise=False, compute_Q=False) if (kwargs['skillset']): ## break actions into primitives and their params eval_primitives_prob = eval_paction[:kwargs[ 'my_skill_set'].len] eval_primitive_id = np.argmax(eval_primitives_prob) eval_r = 0. eval_skill_obs = eval_obs.copy() for _ in range(kwargs['commit_for']): eval_action = my_skill_set.pi( primitive_id=eval_primitive_id, obs=eval_skill_obs.copy(), primitive_params=eval_paction[my_skill_set. len:]) eval_new_obs, eval_skill_r, eval_done, eval_info = eval_env.step( max_action * eval_action ) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) if render_eval: eval_env.render() eval_r += eval_skill_r # check for skill termination or episode termination eval_terminate_skill = my_skill_set.termination( eval_new_obs, eval_primitive_id, primitive_params=eval_paction[my_skill_set. len:]) if eval_done or eval_terminate_skill: break eval_skill_obs = eval_new_obs # hack assuming the skills are trained from diff reward signal eval_r = eval_skill_r else: eval_action, _ = eval_paction, eval_pq eval_new_obs, eval_r, eval_done, eval_info = eval_env.step( max_action * eval_action) eval_episode_reward += eval_r eval_obs = eval_new_obs eval_episode_rewards.append(eval_episode_reward) eval_episode_rewards_history.append(eval_episode_reward) eval_episode_success.append( eval_info["done"] == "goal reached") if (eval_info["done"] == "goal reached"): logger.info( "success, training epoch:%d,starting config:" % epoch, eval_obs_start, 'final state', eval_obs) if dologging and rank == 0: print("Logging!") # Log stats. epoch_train_duration = time.time() - epoch_start_time duration = time.time() - start_time stats = agent.get_stats() combined_stats = {} for key in sorted(stats.keys()): combined_stats[key] = normal_mean(stats[key]) # Rollout statistics. combined_stats['rollout/return'] = normal_mean( epoch_episode_rewards) if len(episode_rewards_history) > 0: combined_stats['rollout/return_history'] = normal_mean( np.mean(episode_rewards_history)) else: combined_stats['rollout/return_history'] = 0. combined_stats['rollout/episode_steps'] = normal_mean( epoch_episode_steps) combined_stats['rollout/episodes'] = np.sum(epoch_episodes) combined_stats['rollout/actions_mean'] = normal_mean( epoch_actions) combined_stats['rollout/actions_std'] = normal_std( epoch_actions) # Train statistics. combined_stats['train/loss_actor'] = normal_mean( epoch_actor_losses) combined_stats['train/loss_critic'] = normal_mean( epoch_critic_losses) if param_noise is not None: combined_stats['train/param_noise_distance'] = normal_mean( epoch_adaptive_distances) if kwargs['look_ahead']: combined_stats['train/exploration'] = exploration.value( epoch * nb_epoch_cycles + cycle) # Evaluation statistics. if eval_env is not None: combined_stats['eval/return'] = normal_mean( eval_episode_rewards) combined_stats['eval/success'] = normal_mean( eval_episode_success) if len(eval_episode_rewards_history) > 0: combined_stats['eval/return_history'] = normal_mean( np.mean(eval_episode_rewards_history)) else: combined_stats['eval/return_history'] = 0. combined_stats['eval/episodes'] = normal_mean( len(eval_episode_rewards)) # Total statistics. combined_stats['total/duration'] = normal_mean(duration) combined_stats['total/rollout_per_second'] = normal_mean( float(t) / float(duration)) combined_stats['total/episodes'] = normal_mean(episodes) combined_stats['total/epochs'] = epoch + 1 combined_stats['total/steps'] = t for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() # if rank == 0 and logdir: # print("Dumping progress!") # if hasattr(env, 'get_state'): # with open(osp.join(logdir, 'env_state.pkl'), 'wb') as f: # pickle.dump(env.get_state(), f) # if eval_env and hasattr(eval_env, 'get_state'): # with open(osp.join(logdir, 'eval_env_state.pkl'), 'wb') as f: # pickle.dump(eval_env.get_state(), f) ## save tf model if rank == 0 and (epoch + 1) % save_freq == 0: print("Saving the model!") os.makedirs(osp.join(logdir, "model"), exist_ok=True) saver.save(U.get_session(), logdir + "/model/ddpg", global_step=epoch)
def run(env_id, seed, noise_type, layer_norm, evaluation, memory_size, factor, **kwargs): # Configure things. rank = 0 if rank != 0: logger.set_level(logger.DISABLED) dologging = kwargs["dologging"] # Create envs. env = gym.make(env_id) gym.logger.setLevel(logging.WARN) if evaluation and rank == 0: eval_env = gym.make(env_id) else: eval_env = None # Parse noise_type action_noise = None param_noise = None nb_actions = env.action_space.shape[-1] for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) # Configure components. single_train = False ospace = env.observation_space has_image = (not hasattr(ospace, 'shape')) or (not ospace.shape) if has_image: assert isinstance(env.observation_space, gym.spaces.Tuple) env.observation_space.shape = [ x.shape for x in env.observation_space.spaces ] #eval_env.observation_space.shape = [x.shape for x in eval_env.observation_space.spaces] if rank == 0 or not single_train: memory = Memory(limit=memory_size, action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) else: memory = None if has_image: ignore = False if ignore: critic = IgnoreDepthCritic(layer_norm=layer_norm) actor = IgnoreDepthActor(nb_actions, layer_norm=layer_norm) else: critic = DepthCritic(layer_norm=layer_norm) if factor: actor = FactoredDepthActor(nb_actions, layer_norm=layer_norm) else: actor = DepthActor(nb_actions, layer_norm=layer_norm) else: critic = Critic(layer_norm=layer_norm) actor = Actor(nb_actions, layer_norm=layer_norm) # Seed everything to make things reproducible. seed = seed + 1000000 * rank logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir())) tf.reset_default_graph() set_global_seeds(seed) env.seed(seed) if eval_env is not None: eval_env.seed(6) # Disable logging for rank != 0 to avoid noise. if rank == 0: start_time = time.time() testing.test(env=env, eval_env=eval_env, param_noise=param_noise, action_noise=action_noise, actor=actor, critic=critic, memory=memory, **kwargs) env.close() if eval_env is not None: eval_env.close() if rank == 0: logger.info('total runtime: {}s'.format(time.time() - start_time))
def run(env_id, seed, noise_type, layer_norm, evaluation, **kwargs): # Configure things. rank = 0 if rank != 0: logger.set_level(logger.DISABLED) dologging = kwargs["dologging"] # Create envs. env = gym.make(env_id) gym.logger.setLevel(logging.WARN) if evaluation and rank == 0: eval_env = gym.make(env_id) else: eval_env = None tf.reset_default_graph() if kwargs['skillset']: skillset_file = __import__("HER.skills.%s" % kwargs['skillset'], fromlist=['']) my_skill_set = SkillSet(skillset_file.skillset) nb_actions = my_skill_set.params + my_skill_set.len else: nb_actions = env.action_space.shape[-1] # Configure components. memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(layer_norm=layer_norm) if kwargs['skillset'] is None: actor = Actor(discrete_action_size=env.env.discrete_action_size, cts_action_size=nb_actions - env.env.discrete_action_size, layer_norm=layer_norm) my_skill_set = None else: # pass # get the skillset and make actor accordingly actor = Actor(discrete_action_size=my_skill_set.len, cts_action_size=nb_actions - my_skill_set.len, layer_norm=layer_norm) # Seed everything to make things reproducible. seed = seed + 1000000 * rank logger.info('rank {}: seed={}, logdir={}'.format(rank, seed, logger.get_dir())) set_global_seeds(seed) env.seed(seed) if eval_env is not None: eval_env.seed(seed) # Disable logging for rank != 0 to avoid noise. if rank == 0: start_time = time.time() testing.test(env=env, eval_env=eval_env, param_noise=None, action_noise=None, actor=actor, critic=critic, memory=memory, my_skill_set=my_skill_set, **kwargs) env.close() if eval_env is not None: eval_env.close() if rank == 0: logger.info('total runtime: {}s'.format(time.time() - start_time))
def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, additional_critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.05, eval_env=None, param_noise_adaption_interval=50, nb_eval_episodes=20, **kwargs): rank = MPI.COMM_WORLD.Get_rank() assert (np.abs(env.action_space.low) == env.action_space.high ).all() # we assume symmetric actions. max_action = env.action_space.high if "dologging" in kwargs: dologging = kwargs["dologging"] else: dologging = True if "tf_sum_logging" in kwargs: tf_sum_logging = kwargs["tf_sum_logging"] else: tf_sum_logging = False if "invert_grad" in kwargs: invert_grad = kwargs["invert_grad"] else: invert_grad = False if "actor_reg" in kwargs: actor_reg = kwargs["actor_reg"] else: actor_reg = False if dologging: logger.info( 'scaling actions by {} before executing in env'.format(max_action)) agent = CDQ(actor, critic, additional_critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, inverting_grad=invert_grad, actor_reg=actor_reg) if dologging: logger.debug('Using agent with the following configuration:') if dologging: logger.debug(str(agent.__dict__.items())) # Set up logging stuff only for a single worker. if rank != -1: saver = tf.train.Saver(keep_checkpoint_every_n_hours=2, max_to_keep=5, save_relative_paths=True) save_freq = kwargs["save_freq"] else: saver = None # step = 0 global_t = 0 episode = 0 eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: # Set summary saver if dologging and tf_sum_logging and rank == 0: tf.summary.histogram("actor_grads", agent.actor_grads) tf.summary.histogram("critic_grads", agent.critic_grads) actor_trainable_vars = actor.trainable_vars for var in actor_trainable_vars: tf.summary.histogram(var.name, var) critic_trainable_vars = critic.trainable_vars for var in critic_trainable_vars: tf.summary.histogram(var.name, var) tf.summary.histogram("actions_out", agent.actor_tf) tf.summary.histogram("critic_out", agent.critic_tf) tf.summary.histogram("target_Q", agent.target_Q) summary_var = tf.summary.merge_all() writer_t = tf.summary.FileWriter( osp.join(logger.get_dir(), 'train'), sess.graph) else: summary_var = tf.no_op() # Prepare everything. agent.initialize(sess) sess.graph.finalize() #set_trace() ## restore if kwargs["restore_dir"] is not None: restore_dir = osp.join(kwargs["restore_dir"], "model") if (restore_dir is not None): print('Restore path : ', restore_dir) # checkpoint = tf.train.get_checkpoint_state(restore_dir) # if checkpoint and checkpoint.model_checkpoint_path: model_checkpoint_path = read_checkpoint_local(restore_dir) if model_checkpoint_path: saver.restore(U.get_session(), model_checkpoint_path) print("checkpoint loaded:", model_checkpoint_path) logger.info("checkpoint loaded:" + str(model_checkpoint_path)) tokens = model_checkpoint_path.split("-")[-1] # set global step global_t = int(tokens) print(">>> global step set:", global_t) agent.reset() obs = env.reset() done = False episode_reward = 0. episode_step = 0 episodes = 0 t = 0 epoch = 0 start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_episode_eval_rewards = [] epoch_episode_eval_steps = [] epoch_start_time = time.time() epoch_actions = [] epoch_qs = [] epoch_episodes = 0 ## containers for hindsight if kwargs["her"]: # logger.info("-"*50 +'\nWill create HER\n' + "-"*50) states, actions = [], [] print("Ready to go!") for epoch in range(global_t, nb_epochs): # stat containers epoch_actor_losses = [] epoch_critic_losses = [] epoch_adaptive_distances = [] eval_episode_rewards = [] eval_qs = [] eval_episode_success = [] for cycle in range(nb_epoch_cycles): # print("cycle:%d"%cycle) # Perform rollouts. for t_rollout in range( int(nb_rollout_steps / MPI.COMM_WORLD.Get_size())): # print(rank, t_rollout) # Predict next action. action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape == env.action_space.shape # Execute next action. if rank == 0 and render: env.render() assert max_action.shape == action.shape new_obs, r, done, info = env.step( max_action * action ) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) #if((t+1)%100) == 0: # print(max_action*action, new_obs, r) t += 1 if rank == 0 and render: env.render() sleep(0.1) episode_reward += r episode_step += 1 # Book-keeping. epoch_actions.append(action) epoch_qs.append(q) agent.store_transition(obs, action, r, new_obs, done) ## storing info for hindsight states.append(obs.copy()) actions.append(action.copy()) obs = new_obs if done: # Episode done. epoch_episode_rewards.append(episode_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(episode_step) episode_reward = 0. episode_step = 0 epoch_episodes += 1 episodes += 1 if kwargs["her"]: # logger.info("-"*50 +'\nCreating HER\n' + "-"*50) ## create hindsight experience replay her_states, her_rewards = env.env.apply_hindsight( states, actions, new_obs.copy()) ## store her transitions: her_states: n+1, her_rewards: n for her_i in range(len(her_states) - 2): agent.store_transition(her_states[her_i], actions[her_i], her_rewards[her_i], her_states[her_i + 1], False) #store last transition agent.store_transition(her_states[-2], actions[-1], her_rewards[-1], her_states[-1], True) ## refresh the storage containers del states, actions states, actions = [], [] agent.reset() obs = env.reset() #print(obs) # print(rank, "Training!") # Train. for t_train in range(nb_train_steps): # print(rank, t_train) # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and t % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) cl, al, current_summary = agent.train(summary_var) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() if dologging and tf_sum_logging and rank == 0: writer_t.add_summary( current_summary, epoch * nb_epoch_cycles * nb_train_steps + cycle * nb_train_steps + t_train) # print("Evaluating!") # Evaluate. if (eval_env is not None) and rank == 0: for _ in range(nb_eval_episodes): eval_episode_reward = 0. eval_obs = eval_env.reset() eval_obs_start = eval_obs.copy() eval_done = False while (not eval_done): eval_action, eval_q = agent.pi(eval_obs, apply_noise=False, compute_Q=True) eval_obs, eval_r, eval_done, eval_info = eval_env.step( max_action * eval_action ) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) if render_eval: sleep(0.1) print("Render!") eval_env.render() print("rendered!") eval_episode_reward += eval_r eval_qs.append(eval_q) eval_episode_rewards.append(eval_episode_reward) eval_episode_rewards_history.append(eval_episode_reward) eval_episode_success.append( eval_info["done"] == "goal reached") if (eval_info["done"] == "goal reached"): logger.info( "success, training epoch:%d,starting config:" % epoch, eval_obs_start, 'final state', eval_obs) if dologging and rank == 0: print("Logging!") # Log stats. epoch_train_duration = time.time() - epoch_start_time duration = time.time() - start_time stats = agent.get_stats() combined_stats = {} for key in sorted(stats.keys()): combined_stats[key] = normal_mean(stats[key]) # Rollout statistics. combined_stats['rollout/return'] = normal_mean( epoch_episode_rewards) if len(episode_rewards_history) > 0: combined_stats['rollout/return_history'] = normal_mean( np.mean(episode_rewards_history)) else: combined_stats['rollout/return_history'] = 0. combined_stats['rollout/episode_steps'] = normal_mean( epoch_episode_steps) combined_stats['rollout/episodes'] = np.sum(epoch_episodes) combined_stats['rollout/actions_mean'] = normal_mean( epoch_actions) combined_stats['rollout/actions_std'] = normal_std( epoch_actions) combined_stats['rollout/Q_mean'] = normal_mean(epoch_qs) # Train statistics. combined_stats['train/loss_actor'] = normal_mean( epoch_actor_losses) combined_stats['train/loss_critic'] = normal_mean( epoch_critic_losses) combined_stats['train/param_noise_distance'] = normal_mean( epoch_adaptive_distances) # Evaluation statistics. if eval_env is not None: combined_stats['eval/return'] = normal_mean( eval_episode_rewards) combined_stats['eval/success'] = normal_mean( eval_episode_success) if len(eval_episode_rewards_history) > 0: combined_stats['eval/return_history'] = normal_mean( np.mean(eval_episode_rewards_history)) else: combined_stats['eval/return_history'] = 0. combined_stats['eval/Q'] = normal_mean(eval_qs) combined_stats['eval/episodes'] = normal_mean( len(eval_episode_rewards)) # Total statistics. combined_stats['total/duration'] = normal_mean(duration) combined_stats['total/steps_per_second'] = normal_mean( float(t) / float(duration)) combined_stats['total/episodes'] = normal_mean(episodes) combined_stats['total/epochs'] = epoch + 1 combined_stats['total/steps'] = t for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: print("Dumping progress!") if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(), f) ## save tf model if rank == 0 and (epoch + 1) % save_freq == 0: print("Saving the model!") os.makedirs(osp.join(logdir, "model"), exist_ok=True) saver.save(U.get_session(), logdir + "/model/cdq", global_step=epoch)
def run(env_id, seed, evaluation, **kwargs): # Create envs. env = gym.make(env_id) # print(env.action_space.shape) logger.info("Env info") logger.info(env.__doc__) logger.info("-"*20) gym.logger.setLevel(logging.WARN) if evaluation: if kwargs['eval_env_id']: eval_env_id = kwargs['eval_env_id'] else: eval_env_id = env_id eval_env = gym.make(eval_env_id) # del eval_env_id from kwargs del kwargs['eval_env_id'] else: eval_env = None if kwargs['skillset']: skillset_file = __import__("HER.skills.%s"%kwargs['skillset'], fromlist=['']) my_skill_set = SkillSet(skillset_file.skillset) model = models.mlp([64]) # Seed everything to make things reproducible. logger.info('seed={}, logdir={}'.format(seed, logger.get_dir())) set_global_seeds(seed) env.seed(seed) if eval_env is not None: eval_env.seed(seed) start_time = time.time() training.train( env=env, eval_env = eval_env, q_func=model, lr=kwargs['lr'], max_timesteps=kwargs['num_timesteps'], buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.002, train_freq=1, batch_size=kwargs['batch_size'], print_freq=100, checkpoint_freq=kwargs['save_freq'], learning_starts=max(50, kwargs['batch_size']), target_network_update_freq=100, prioritized_replay= kwargs['prioritized_replay'], prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, gamma = kwargs['gamma'], log_dir = kwargs['log_dir'], my_skill_set= my_skill_set, num_eval_episodes=kwargs['num_eval_episodes'], render = kwargs['render'], render_eval = kwargs['render_eval'], commit_for = kwargs['commit_for'] ) env.close() if eval_env is not None: eval_env.close() logger.info('total runtime: {}s'.format(time.time() - start_time))
def __init__(self, actor, critic, memory, observation_shape, action_shape, single_train, param_noise=None, action_noise=None, gamma=0.99, tau=0.001, normalize_returns=False, enable_popart=False, normalize_observations=True, batch_size=128, observation_range=(-5., 5.), action_range=(-1., 1.), return_range=(-np.inf, np.inf), adaptive_param_noise=True, adaptive_param_noise_policy_threshold=.1, critic_l2_reg=0., actor_lr=1e-4, critic_lr=1e-3, clip_norm=None, reward_scale=1., inverting_grad=False, actor_reg=True): logger.info("DDPG params") logger.info(str(locals())) logger.info("-" * 20) # Inputs. self.single_train = single_train #is the observation space a Tuple space? self.tuple_obs = (isinstance(observation_shape[0], list) or isinstance(observation_shape[0], tuple)) if self.tuple_obs: self.obs0 = [ tf.placeholder(tf.float32, shape=(None, ) + observation_shape_, name='obs0') for observation_shape_ in observation_shape ] self.obs1 = [ tf.placeholder(tf.float32, shape=(None, ) + observation_shape_, name='obs1') for observation_shape_ in observation_shape ] else: self.obs0 = tf.placeholder(tf.float32, shape=(None, ) + observation_shape, name='obs0') self.obs1 = tf.placeholder(tf.float32, shape=(None, ) + observation_shape, name='obs1') self.terminals1 = tf.placeholder(tf.float32, shape=(None, 1), name='terminals1') self.rewards = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.actions = tf.placeholder(tf.float32, shape=(None, ) + action_shape, name='actions') self.critic_target = tf.placeholder(tf.float32, shape=(None, 1), name='critic_target') self.param_noise_stddev = tf.placeholder(tf.float32, shape=(), name='param_noise_stddev') # Parameters. self.gamma = gamma self.tau = tau self.memory = memory self.normalize_observations = normalize_observations self.normalize_returns = normalize_returns self.action_noise = action_noise self.param_noise = param_noise self.action_range = action_range self.return_range = return_range self.observation_range = observation_range self.critic = critic self.actor = actor self.actor_lr = actor_lr self.critic_lr = critic_lr self.clip_norm = clip_norm self.enable_popart = enable_popart self.reward_scale = reward_scale self.batch_size = batch_size self.stats_sample = None self.critic_l2_reg = critic_l2_reg self.inverting_grad = inverting_grad self.actor_reg = actor_reg self.total_recv = 0 self.buffers = [] # Observation normalization. if self.normalize_observations: with tf.variable_scope('obs_rms'): obs_shape = observation_shape[ 0] if self.tuple_obs else observation_shape self.obs_rms = RunningMeanStd(shape=obs_shape) else: self.obs_rms = None if self.tuple_obs: #normalize only the first item normalized_obs0 = self.obs0 normalized_obs1 = self.obs1 normalized_obs0[0] = tf.clip_by_value( normalize(self.obs0[0], self.obs_rms), self.observation_range[0], self.observation_range[1]) normalized_obs1[0] = tf.clip_by_value( normalize(self.obs1[0], self.obs_rms), self.observation_range[0], self.observation_range[1]) else: normalized_obs0 = tf.clip_by_value( normalize(self.obs0, self.obs_rms), self.observation_range[0], self.observation_range[1]) normalized_obs1 = tf.clip_by_value( normalize(self.obs1, self.obs_rms), self.observation_range[0], self.observation_range[1]) # Return normalization. if self.normalize_returns: with tf.variable_scope('ret_rms'): self.ret_rms = RunningMeanStd() else: self.ret_rms = None # Create target networks. target_actor = copy(actor) target_actor.name = 'target_actor' self.target_actor = target_actor target_critic = copy(critic) target_critic.name = 'target_critic' self.target_critic = target_critic # Create networks and core TF parts that are shared across setup parts. self.normalized_critic_tf = critic(normalized_obs0, self.actions) self.critic_tf = denormalize( tf.clip_by_value(self.normalized_critic_tf, self.return_range[0], self.return_range[1]), self.ret_rms) Q_obs1 = denormalize( target_critic(normalized_obs1, target_actor(normalized_obs1)), self.ret_rms) self.target_Q = self.rewards + (1. - self.terminals1) * gamma * Q_obs1 # clip the target Q value self.target_Q = tf.clip_by_value(self.target_Q, -1 / (1 - gamma), 0) self.actor_tf = actor(normalized_obs0) if inverting_grad: actor_tf_clone_with_invert_grad = my_op.py_func( my_op.my_identity_func, [self.actor_tf, -1., 1.], self.actor_tf.dtype, name="MyIdentity", grad=my_op._custom_identity_grad) self.actor_tf = tf.reshape(actor_tf_clone_with_invert_grad, tf.shape(self.actor_tf)) self.normalized_critic_with_actor_tf = critic(normalized_obs0, self.actor_tf, reuse=True) self.critic_with_actor_tf = denormalize( tf.clip_by_value(self.normalized_critic_with_actor_tf, self.return_range[0], self.return_range[1]), self.ret_rms) # Set up parts. if self.param_noise is not None: self.setup_param_noise(normalized_obs0) self.setup_actor_optimizer() self.setup_critic_optimizer() if self.normalize_returns and self.enable_popart: self.setup_popart() self.setup_stats() self.setup_target_network_updates()
def train(env, eval_env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, my_skill_set=None, log_dir = None, num_eval_episodes=10, render=False, render_eval = False, commit_for = 1 ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model if my_skill_set: assert commit_for>=1, "commit_for >= 1" save_idx = 0 with U.single_threaded_session() as sess: ## restore if my_skill_set: action_shape = my_skill_set.len else: action_shape = env.action_space.n # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return U.BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=action_shape, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': action_shape, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() # sess.run(tf.variables_initializer(new_variables)) # sess.run(tf.global_variables_initializer()) update_target() if my_skill_set: ## restore skills my_skill_set.restore_skillset(sess=sess) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True model_saved = False model_file = os.path.join(log_dir, "model", "deepq") # save the initial act model print("Saving the starting model") os.makedirs(os.path.dirname(model_file), exist_ok=True) act.save(model_file + '.pkl') for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True paction = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] if(my_skill_set): skill_obs = obs.copy() primitive_id = paction rew = 0. for _ in range(commit_for): ## break actions into primitives and their params action = my_skill_set.pi(primitive_id=primitive_id, obs = skill_obs.copy(), primitive_params=None) new_obs, skill_rew, done, _ = env.step(action) if render: # print(action) env.render() sleep(0.1) rew += skill_rew skill_obs = new_obs terminate_skill = my_skill_set.termination(new_obs) if done or terminate_skill: break else: action= paction env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) if render: env.render() sleep(0.1) # Store transition in the replay buffer for the outer env replay_buffer.add(obs, paction, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True print("Time:%d, episodes:%d"%(t,len(episode_rewards))) # add hindsight experience if t > learning_starts and t % train_freq == 0: # print('Training!') # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() # print(len(episode_rewards), episode_rewards[-11:-1]) mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if (checkpoint_freq is not None and t > learning_starts and num_episodes > 50 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) act.save(model_file + '%d.pkl'%save_idx) save_idx += 1 model_saved = True saved_mean_reward = mean_100ep_reward # else: # print(saved_mean_reward, mean_100ep_reward) if (eval_env is not None) and t > learning_starts and t % target_network_update_freq == 0: # dumping other stats logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("%d time spent exploring", int(100 * exploration.value(t))) print("Testing!") eval_episode_rewards = [] eval_episode_successes = [] for i in range(num_eval_episodes): eval_episode_reward = 0. eval_obs = eval_env.reset() eval_obs_start = eval_obs.copy() eval_done = False while(not eval_done): eval_paction = act(np.array(eval_obs)[None])[0] if(my_skill_set): eval_skill_obs = eval_obs.copy() eval_primitive_id = eval_paction eval_r = 0. for _ in range(commit_for): ## break actions into primitives and their params eval_action, _ = my_skill_set.pi(primitive_id=eval_primitive_id, obs = eval_skill_obs.copy(), primitive_params=None) eval_new_obs, eval_skill_rew, eval_done, eval_info = eval_env.step(eval_action) # print('env reward:%f'%eval_skill_rew) if render_eval: print("Render!") eval_env.render() print("rendered!") eval_r += eval_skill_rew eval_skill_obs = eval_new_obs eval_terminate_skill = my_skill_set.termination(eval_new_obs) if eval_done or eval_terminate_skill: break else: eval_action= eval_paction env_action = eval_action reset = False eval_new_obs, eval_r, eval_done, eval_info = eval_env.step(env_action) if render_eval: # print("Render!") eval_env.render() # print("rendered!") eval_episode_reward += eval_r # print("eval_r:%f, eval_episode_reward:%f"%(eval_r, eval_episode_reward)) eval_obs = eval_new_obs eval_episode_success = (eval_info["done"]=="goal reached") if(eval_episode_success): logger.info("success, training epoch:%d,starting config:"%t) eval_episode_rewards.append(eval_episode_reward) eval_episode_successes.append(eval_episode_success) combined_stats = {} # print(eval_episode_successes, np.mean(eval_episode_successes)) combined_stats['eval/return'] = normal_mean(eval_episode_rewards) combined_stats['eval/success'] = normal_mean(eval_episode_successes) combined_stats['eval/episodes'] = (len(eval_episode_rewards)) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) print("dumping the stats!") logger.dump_tabular() if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file)