def __init__(self, hyperparams, dX, dU): """Initializes the policy. Args: hyperparams: Dictionary of hyperparameters. dX: Dimension of state space. dU: Dimension of action space. """ PolicyOpt.__init__(self, hyperparams, dX, dU) self.dX = dX self.dU = dU self.epochs = hyperparams['epochs'] self.param_noise_adaption_interval = hyperparams[ 'param_noise_adaption_interval'] set_global_seeds(hyperparams['seed']) # Initialize DDPG policy self.pol = DDPG(Actor(dU, network=hyperparams['network'], **hyperparams['network_kwargs']), Critic(network=hyperparams['network'], **hyperparams['network_kwargs']), Memory(limit=hyperparams['memory_limit'], action_shape=(dU, ), observation_shape=(dX, )), observation_shape=(dX, ), action_shape=(dU, ), param_noise=AdaptiveParamNoiseSpec( initial_stddev=0.2, desired_action_stddev=0.2), **hyperparams['ddpg_kwargs']) sess = get_session() self.pol.initialize(sess) sess.graph.finalize() self.policy = self # Act method is contained in this class
def learn( network, env, seed=None, total_timesteps=None, nb_epochs=None, # with default settings, perform 1M steps total nb_epoch_cycles=20, nb_rollout_steps=100, reward_scale=1.0, render=False, render_eval=False, noise_type='adaptive-param_0.2', normalize_returns=False, normalize_observations=True, critic_l2_reg=1e-2, actor_lr=1e-4, critic_lr=1e-3, popart=False, gamma=0.99, clip_norm=None, nb_train_steps=50, # per epoch cycle and MPI worker, nb_eval_steps=100, batch_size=64, # per MPI worker tau=0.01, eval_env=None, param_noise_adaption_interval=50, **network_kwargs): set_global_seeds(seed) if total_timesteps is not None: assert nb_epochs is None nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps) else: nb_epochs = 500 rank = MPI.COMM_WORLD.Get_rank() nb_actions = env.action_space.shape[-1] assert (np.abs(env.action_space.low) == env.action_space.high ).all() # we assume symmetric actions. memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(network=network, **network_kwargs) actor = Actor(nb_actions, network=network, **network_kwargs) action_noise = None param_noise = None if noise_type is not None: for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) max_action = env.action_space.high logger.info( 'scaling actions by {} before executing in env'.format(max_action)) agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) sess = U.get_session() # Prepare everything. agent.initialize(sess) sess.graph.finalize() agent.reset() obs = env.reset() if eval_env is not None: eval_obs = eval_env.reset() nenvs = obs.shape[0] episode_reward = np.zeros(nenvs, dtype=np.float32) #vector episode_step = np.zeros(nenvs, dtype=int) # vector episodes = 0 #scalar t = 0 # scalar epoch = 0 start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_actions = [] epoch_qs = [] epoch_episodes = 0 for epoch in range(nb_epochs): for cycle in range(nb_epoch_cycles): # Perform rollouts. if nenvs > 1: # if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each # of the environments, so resetting here instead agent.reset() for t_rollout in range(nb_rollout_steps): # Predict next action. action, q, _, _ = agent.step(obs, apply_noise=True, compute_Q=True) # Execute next action. if rank == 0 and render: env.render() # max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch new_obs, r, done, info = env.step( max_action * action ) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) # note these outputs are batched from vecenv t += 1 if rank == 0 and render: env.render() episode_reward += r episode_step += 1 # Book-keeping. epoch_actions.append(action) epoch_qs.append(q) agent.store_transition( obs, action, r, new_obs, done ) #the batched data will be unrolled in memory.py's append. obs = new_obs for d in range(len(done)): if done[d]: # Episode done. epoch_episode_rewards.append(episode_reward[d]) episode_rewards_history.append(episode_reward[d]) epoch_episode_steps.append(episode_step[d]) episode_reward[d] = 0. episode_step[d] = 0 epoch_episodes += 1 episodes += 1 if nenvs == 1: agent.reset() # Train. epoch_actor_losses = [] epoch_critic_losses = [] epoch_adaptive_distances = [] for t_train in range(nb_train_steps): # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) cl, al = agent.train() epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() # Evaluate. eval_episode_rewards = [] eval_qs = [] if eval_env is not None: nenvs_eval = eval_obs.shape[0] eval_episode_reward = np.zeros(nenvs_eval, dtype=np.float32) for t_rollout in range(nb_eval_steps): eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True) eval_obs, eval_r, eval_done, eval_info = eval_env.step( max_action * eval_action ) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) if render_eval: eval_env.render() eval_episode_reward += eval_r eval_qs.append(eval_q) for d in range(len(eval_done)): if eval_done[d]: eval_episode_rewards.append(eval_episode_reward[d]) eval_episode_rewards_history.append( eval_episode_reward[d]) eval_episode_reward[d] = 0.0 mpi_size = MPI.COMM_WORLD.Get_size() # Log stats. # XXX shouldn't call np.mean on variable length lists duration = time.time() - start_time stats = agent.get_stats() combined_stats = stats.copy() combined_stats['rollout/return'] = np.mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = np.mean( episode_rewards_history) combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps) combined_stats['rollout/actions_mean'] = np.mean(epoch_actions) combined_stats['rollout/Q_mean'] = np.mean(epoch_qs) combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses) combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = np.mean( epoch_adaptive_distances) combined_stats['total/duration'] = duration combined_stats['total/steps_per_second'] = float(t) / float(duration) combined_stats['total/episodes'] = episodes combined_stats['rollout/episodes'] = epoch_episodes combined_stats['rollout/actions_std'] = np.std(epoch_actions) # Evaluation statistics. if eval_env is not None: combined_stats['eval/return'] = eval_episode_rewards combined_stats['eval/return_history'] = np.mean( eval_episode_rewards_history) combined_stats['eval/Q'] = eval_qs combined_stats['eval/episodes'] = len(eval_episode_rewards) def as_scalar(x): if isinstance(x, np.ndarray): assert x.size == 1 return x[0] elif np.isscalar(x): return x else: raise ValueError('expected scalar, got %s' % x) combined_stats_sums = MPI.COMM_WORLD.allreduce( np.array( [np.array(x).flatten()[0] for x in combined_stats.values()])) combined_stats = { k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums) } # Total statistics. combined_stats['total/epochs'] = epoch + 1 combined_stats['total/steps'] = t for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) if rank == 0: logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(), f) return agent
memory = Memory(limit=int(1e6), action_shape=action_shape, observation_shape=observation_shape) critic = Critic(network=network) actor = Actor(nb_action, network=network) agent = DDPG(actor, critic, memory, observation_shape, action_shape, gamma=0.99, tau=0.01, normalize_returns=False, normalize_observations=True, batch_size=32, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=1e-2, actor_lr=1e-4, critic_lr=1e-3, enable_popart=popart, clip_norm=None, reward_scale=1) sess = U.get_session() agent.initialize(sess) sess.graph.finalize() agent.reset() if load_path is None: epoch_actor_losses = []
def __init__( self, env, gamma, total_timesteps, network='mlp', nb_rollout_steps=100, reward_scale=1.0, noise_type='adaptive-param_0.2', normalize_returns=False, normalize_observations=False, critic_l2_reg=1e-2, actor_lr=1e-4, critic_lr=1e-3, popart=False, clip_norm=None, nb_train_steps=50, # per epoch cycle and MPI worker, <- HERE! nb_eval_steps=100, buffer_size=1000000, batch_size=64, # per MPI worker tau=0.01, param_noise_adaption_interval=50, **network_kwargs): # Adjusting hyper-parameters by considering the number of options policies to learn num_options = env.get_number_of_options() buffer_size = num_options * buffer_size batch_size = num_options * batch_size observation_space = env.option_observation_space action_space = env.option_action_space nb_actions = action_space.shape[-1] assert (np.abs(action_space.low) == action_space.high ).all() # we assume symmetric actions. memory = Memory(limit=buffer_size, action_shape=action_space.shape, observation_shape=observation_space.shape) critic = Critic(network=network, **network_kwargs) actor = Actor(nb_actions, network=network, **network_kwargs) action_noise = None param_noise = None if noise_type is not None: for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) max_action = action_space.high logger.info( 'scaling actions by {} before executing in env'.format(max_action)) agent = DDPG(actor, critic, memory, observation_space.shape, action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) sess = U.get_session() # Prepare everything. agent.initialize(sess) sess.graph.finalize() agent.reset() # Variables that are used during learning self.agent = agent self.memory = memory self.max_action = max_action self.batch_size = batch_size self.nb_train_steps = nb_train_steps self.nb_rollout_steps = nb_rollout_steps self.param_noise_adaption_interval = param_noise_adaption_interval
def learn( network, env, data_path='', model_path='./model/', model_name='ddpg_none_fuzzy_150', file_name='test', model_based=False, memory_extend=False, model_type='linear', restore=False, dyna_learning=False, seed=None, nb_epochs=5, # with default settings, perform 1M steps total nb_sample_cycle=5, nb_epoch_cycles=150, nb_rollout_steps=400, nb_model_learning=10, nb_sample_steps=50, nb_samples_extend=5, reward_scale=1.0, noise_type='normal_0.2', #'adaptive-param_0.2', ou_0.2, normal_0.2 normalize_returns=False, normalize_observations=True, critic_l2_reg=1e-2, actor_lr=1e-4, critic_lr=1e-3, popart=False, gamma=0.99, clip_norm=None, nb_train_steps=50, # per epoch cycle and MPI worker, batch_size=32, # per MPI worker tau=0.01, param_noise_adaption_interval=50, **network_kwargs): nb_actions = env.action_space.shape[0] memory = Memory(limit=int(1e5), action_shape=env.action_space.shape[0], observation_shape=env.observation_space.shape) if model_based: """ store fake_data""" fake_memory = Memory(limit=int(1e5), action_shape=env.action_space.shape[0], observation_shape=env.observation_space.shape) """ select model or not """ if model_type == 'gp': kernel = ConstantKernel(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2)) dynamic_model = GaussianProcessRegressor(kernel=kernel) reward_model = GaussianProcessRegressor(kernel=kernel) elif model_type == 'linear': dynamic_model = LinearRegression() reward_model = LinearRegression() elif model_type == 'mlp': dynamic_model = MLPRegressor(hidden_layer_sizes=(100, ), activation='relu', solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08) reward_model = MLPRegressor(hidden_layer_sizes=(100, ), activation='relu', solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08) else: logger.info( "You need to give the model_type to fit the dynamic and reward!!!" ) critic = Critic(network=network, **network_kwargs) actor = Actor(nb_actions, network=network, **network_kwargs) """ set noise """ action_noise = None param_noise = None if noise_type is not None: for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) """action scale""" max_action = env.action_high_bound logger.info( 'scaling actions by {} before executing in env'.format(max_action)) """ agent ddpg """ agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape[0], gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) sess = U.get_session() if restore: agent.restore(sess, model_path, model_name) else: agent.initialize(sess) sess.graph.finalize() agent.reset() episodes = 0 epochs_rewards = np.zeros((nb_epochs, nb_epoch_cycles), dtype=np.float32) epochs_times = np.zeros((nb_epochs, nb_epoch_cycles), dtype=np.float32) epochs_steps = np.zeros((nb_epochs, nb_epoch_cycles), dtype=np.float32) epochs_states = [] for epoch in range(nb_epochs): logger.info( "======================== The {} epoch start !!! =========================" .format(epoch)) epoch_episode_rewards = [] epoch_episode_steps = [] epoch_episode_times = [] epoch_actions = [] epoch_episode_states = [] epoch_qs = [] epoch_episodes = 0 for cycle in range(nb_epoch_cycles): start_time = time.time() obs, state, done = env.reset() obs_reset = cp.deepcopy(obs) episode_reward = 0. episode_step = 0 episode_states = [] logger.info( "================== The {} episode start !!! ===================" .format(cycle)) for t_rollout in range(nb_rollout_steps): logger.info( "================== The {} steps finish !!! ===================" .format(t_rollout)) """ Predict next action """ action, q, _, _ = agent.step(obs, stddev, apply_noise=True, compute_Q=True) new_obs, next_state, r, done, safe_or_not, final_action = env.step( max_action * action, t_rollout) if safe_or_not is False: break episode_reward += r episode_step += 1 episode_states.append([ cp.deepcopy(state), cp.deepcopy(final_action), np.array(cp.deepcopy(r)), cp.deepcopy(next_state) ]) epoch_actions.append(action) epoch_qs.append(q) agent.store_transition(obs, action, r, new_obs, done) obs = new_obs state = next_state if done: break """ extend the memory """ if model_based and cycle > (nb_model_learning + 1) and memory_extend: pred_x = np.zeros((1, 18), dtype=np.float32) for j in range(nb_samples_extend): m_action, _, _, _ = agent.step(obs, stddev, apply_noise=True, compute_Q=False) pred_x[:, :12] = obs pred_x[:, 12:] = m_action m_new_obs = dynamic_model.predict(pred_x)[0] """ get real reward """ # state = env.inverse_state(m_new_obs) # m_reward = env.get_reward(state, m_action) m_reward = reward_model.predict(pred_x)[0] agent.store_transition(obs, m_action, m_reward, m_new_obs, done) """ generate new data and fit model""" if model_based and cycle > nb_model_learning: logger.info( "============================== Model Fit !!! ===============================" ) input_x = np.concatenate( (memory.observations0.data[:memory.nb_entries], memory.actions.data[:memory.nb_entries]), axis=1) input_y_obs = memory.observations1.data[:memory.nb_entries] input_y_reward = memory.rewards.data[:memory.nb_entries] dynamic_model.fit(input_x, input_y_obs) reward_model.fit(input_x, input_y_reward) if dyna_learning: logger.info( "========================= Collect data !!! =================================" ) pred_obs = np.zeros((1, 18), dtype=np.float32) for sample_index in range(nb_sample_cycle): fake_obs = obs_reset for t_episode in range(nb_sample_steps): fake_action, _, _, _ = agent.step(fake_obs, stddev, apply_noise=True, compute_Q=False) pred_obs[:, :12] = fake_obs pred_obs[:, 12:] = fake_action next_fake_obs = dynamic_model.predict(pred_obs)[0] fake_reward = reward_model.predict(pred_obs)[0] # next_fake_obs = dynamic_model.predict(np.concatenate((fake_obs, fake_action)))[0] # fake_reward = reward_model.predict(np.concatenate((fake_obs, fake_action)))[0] fake_obs = next_fake_obs fake_terminals = False fake_memory.append(fake_obs, fake_action, fake_reward, next_fake_obs, fake_terminals) """ noise decay """ stddev = float(stddev) * 0.95 duration = time.time() - start_time epoch_episode_rewards.append(episode_reward) epoch_episode_steps.append(episode_step) epoch_episode_times.append(cp.deepcopy(duration)) epoch_episode_states.append(cp.deepcopy(episode_states)) epochs_rewards[epoch, cycle] = episode_reward epochs_steps[epoch, cycle] = episode_step epochs_times[epoch, cycle] = cp.deepcopy(duration) logger.info( "============================= The Episode_Times:: {}!!! ============================" .format(epoch_episode_rewards)) logger.info( "============================= The Episode_Times:: {}!!! ============================" .format(epoch_episode_times)) epoch_episodes += 1 episodes += 1 """ Training process """ epoch_actor_losses = [] epoch_critic_losses = [] epoch_adaptive_distances = [] for t_train in range(nb_train_steps): logger.info("") # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) cl, al = agent.train() epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() """ planning training """ if model_based and cycle > (nb_model_learning + 1) and dyna_learning: for t_train in range(nb_train_steps): # setting for adapt param noise, if necessary. if fake_memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) batch = fake_memory.sample(batch_size=batch_size) fake_cl, fake_al = agent.train_fake_data(batch) epoch_critic_losses.append(fake_cl) epoch_actor_losses.append(fake_al) agent.update_target_net() epochs_states.append(cp.deepcopy(epoch_episode_states)) # # save data np.save( data_path + 'train_reward_' + algorithm_name + '_' + noise_type + file_name, epochs_rewards) np.save( data_path + 'train_step_' + algorithm_name + '_' + noise_type + file_name, epochs_steps) np.save( data_path + 'train_states_' + algorithm_name + '_' + noise_type + file_name, epochs_states) np.save( data_path + 'train_times_' + algorithm_name + '_' + noise_type + file_name, epochs_times) # # agent save agent.store(model_path + 'train_model_' + algorithm_name + '_' + noise_type + file_name)
def learn(network, env, seed=None, total_timesteps=None, nb_epochs=None, # with default settings, perform 1M steps total nb_epoch_cycles=20, nb_rollout_steps=100, reward_scale=1.0, render=False, render_eval=False, noise_type='adaptive-param_0.2', normalize_returns=False, normalize_observations=True, critic_l2_reg=1e-2, actor_lr=1e-4, critic_lr=1e-3, popart=False, gamma=0.99, clip_norm=None, nb_train_steps=50, # per epoch cycle and MPI worker, nb_eval_steps=100, batch_size=64, # per MPI worker tau=0.01, eval_env=None, param_noise_adaption_interval=50, **network_kwargs): set_global_seeds(seed) if total_timesteps is not None: assert nb_epochs is None nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps) else: nb_epochs = 500 if MPI is not None: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 nb_actions = env.action_space.shape[-1] assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions. memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(network=network, **network_kwargs) actor = Actor(nb_actions, network=network, **network_kwargs) action_noise = None param_noise = None if noise_type is not None: for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError('unknown noise type "{}"'.format(current_noise_type)) max_action = env.action_space.high logger.info('scaling actions by {} before executing in env'.format(max_action)) agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) sess = U.get_session() # Prepare everything. agent.initialize(sess) sess.graph.finalize() agent.reset() obs = env.reset() if eval_env is not None: eval_obs = eval_env.reset() nenvs = obs.shape[0] episode_reward = np.zeros(nenvs, dtype = np.float32) #vector episode_step = np.zeros(nenvs, dtype = int) # vector episodes = 0 #scalar t = 0 # scalar epoch = 0 start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_actions = [] epoch_qs = [] epoch_episodes = 0 for epoch in range(nb_epochs): for cycle in range(nb_epoch_cycles): # Perform rollouts. if nenvs > 1: # if simulating multiple envs in parallel, impossible to reset agent at the end of the episode in each # of the environments, so resetting here instead agent.reset() for t_rollout in range(nb_rollout_steps): # Predict next action. action, q, _, _ = agent.step(obs, apply_noise=True, compute_Q=True) # Execute next action. if rank == 0 and render: env.render() # max_action is of dimension A, whereas action is dimension (nenvs, A) - the multiplication gets broadcasted to the batch new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) # note these outputs are batched from vecenv t += 1 if rank == 0 and render: env.render() episode_reward += r episode_step += 1 # Book-keeping. epoch_actions.append(action) epoch_qs.append(q) agent.store_transition(obs, action, r, new_obs, done) #the batched data will be unrolled in memory.py's append. obs = new_obs for d in range(len(done)): if done[d]: # Episode done. epoch_episode_rewards.append(episode_reward[d]) episode_rewards_history.append(episode_reward[d]) epoch_episode_steps.append(episode_step[d]) episode_reward[d] = 0. episode_step[d] = 0 epoch_episodes += 1 episodes += 1 if nenvs == 1: agent.reset() # Train. epoch_actor_losses = [] epoch_critic_losses = [] epoch_adaptive_distances = [] for t_train in range(nb_train_steps): # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) cl, al = agent.train() epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() # Evaluate. eval_episode_rewards = [] eval_qs = [] if eval_env is not None: nenvs_eval = eval_obs.shape[0] eval_episode_reward = np.zeros(nenvs_eval, dtype = np.float32) for t_rollout in range(nb_eval_steps): eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True) eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1]) if render_eval: eval_env.render() eval_episode_reward += eval_r eval_qs.append(eval_q) for d in range(len(eval_done)): if eval_done[d]: eval_episode_rewards.append(eval_episode_reward[d]) eval_episode_rewards_history.append(eval_episode_reward[d]) eval_episode_reward[d] = 0.0 if MPI is not None: mpi_size = MPI.COMM_WORLD.Get_size() else: mpi_size = 1 # Log stats. # XXX shouldn't call np.mean on variable length lists duration = time.time() - start_time stats = agent.get_stats() combined_stats = stats.copy() combined_stats['rollout/return'] = np.mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = np.mean(episode_rewards_history) combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps) combined_stats['rollout/actions_mean'] = np.mean(epoch_actions) combined_stats['rollout/Q_mean'] = np.mean(epoch_qs) combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses) combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances) combined_stats['total/duration'] = duration combined_stats['total/steps_per_second'] = float(t) / float(duration) combined_stats['total/episodes'] = episodes combined_stats['rollout/episodes'] = epoch_episodes combined_stats['rollout/actions_std'] = np.std(epoch_actions) # Evaluation statistics. if eval_env is not None: combined_stats['eval/return'] = eval_episode_rewards combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history) combined_stats['eval/Q'] = eval_qs combined_stats['eval/episodes'] = len(eval_episode_rewards) def as_scalar(x): if isinstance(x, np.ndarray): assert x.size == 1 return x[0] elif np.isscalar(x): return x else: raise ValueError('expected scalar, got %s'%x) combined_stats_sums = np.array([ np.array(x).flatten()[0] for x in combined_stats.values()]) if MPI is not None: combined_stats_sums = MPI.COMM_WORLD.allreduce(combined_stats_sums) combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)} # Total statistics. combined_stats['total/epochs'] = epoch + 1 combined_stats['total/steps'] = t for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) if rank == 0: logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(), f) return agent
class DDPG_Policy(PolicyOpt): """Policy optimization via DDPG.""" def __init__(self, hyperparams, dX, dU): """Initializes the policy. Args: hyperparams: Dictionary of hyperparameters. dX: Dimension of state space. dU: Dimension of action space. """ PolicyOpt.__init__(self, hyperparams, dX, dU) self.dX = dX self.dU = dU self.epochs = hyperparams['epochs'] self.param_noise_adaption_interval = hyperparams[ 'param_noise_adaption_interval'] set_global_seeds(hyperparams['seed']) # Initialize DDPG policy self.pol = DDPG(Actor(dU, network=hyperparams['network'], **hyperparams['network_kwargs']), Critic(network=hyperparams['network'], **hyperparams['network_kwargs']), Memory(limit=hyperparams['memory_limit'], action_shape=(dU, ), observation_shape=(dX, )), observation_shape=(dX, ), action_shape=(dU, ), param_noise=AdaptiveParamNoiseSpec( initial_stddev=0.2, desired_action_stddev=0.2), **hyperparams['ddpg_kwargs']) sess = get_session() self.pol.initialize(sess) sess.graph.finalize() self.policy = self # Act method is contained in this class def update(self, X, U, cs, **kwargs): """Perform DDPG update step.""" M, N, T, _ = X.shape # Store samples in memory self.pol.store_transition( X[:, :, :-1].reshape(M * N * (T - 1), self.dX), U[:, :, :-1].reshape(M * N * (T - 1), self.dU), -cs[:, :, :-1].reshape(M * N * (T - 1)), X[:, :, 1:].reshape(M * N * (T - 1), self.dX), np.zeros((M * N * (T - 1))), ) # Train DDPG losses = np.zeros((self.epochs, 2)) pbar = tqdm(range(self.epochs)) for epoch in pbar: if self.pol.memory.nb_entries >= self.pol.batch_size and epoch % self.param_noise_adaption_interval == 0: self.pol.adapt_param_noise() losses[epoch] = self.pol.train() self.pol.update_target_net() pbar.set_description("Loss: %.6f/%.6f" % (losses[epoch, 0], losses[epoch, 1])) # Visualize training loss from gps.visualization import visualize_loss visualize_loss( self._data_files_dir + 'plot_gps_training-%02d' % (self.iteration_count), losses, labels=['critic', 'actor'], ) def act(self, x, _, t, noise): """Decides an action for the given state/observation at the current timestep. Args: x: State vector. obs: Observation vector. t: Time step. noise: A dU-dimensional noise vector. Returns: A dU dimensional action vector. """ if t == 0: self.pol.reset() u = self.pol.step(x, apply_noise=np.any(noise), compute_Q=False)[0][0] return u
def learn_setup( network, env, seed=None, total_timesteps=None, iterations=None, nb_epochs=None, # with default settings, perform 1M steps total nb_epoch_cycles=None, nb_rollout_steps=100, n_episodes=None, logspace=True, n_steps_per_episode=None, reward_threshold=0, reward_scale=1.0, render=False, render_eval=False, noise_type='adaptive-param_0.2', noise_level="0.2", normalize_returns=False, normalize_observations=True, critic_l2_reg=1e-2, exp_name="test", actor_lr=1e-4, critic_lr=1e-3, popart=False, gamma=0.99, clip_norm=None, nb_train_steps=50, # per epoch cycle and MPI worker, nb_eval_steps=100, batch_size=64, # per MPI worker tau=0.01, eval_env=None, param_noise_adaption_interval=50, **network_kwargs): if logspace: actor_lr = 10**-actor_lr critic_lr = 10**-critic_lr batch_size = 2**int(batch_size) if seed is None: seed = 17 seed = int(seed) tau = 10**-tau set_global_seeds(seed) if nb_epoch_cycles is None: nb_epoch_cycles = n_episodes nb_rollout_steps = n_steps_per_episode else: input("Not using automated interface? ") if total_timesteps is not None: assert nb_epochs is None nb_epochs = int(total_timesteps) // (nb_epoch_cycles * nb_rollout_steps) if MPI is not None: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 nb_actions = env.action_space.shape[-1] assert (np.abs(env.action_space.low) == env.action_space.high ).all() # we assume symmetric actions. memory = Memory(limit=int(1e5), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape) critic = Critic(network=network, **network_kwargs) actor = Actor(nb_actions, network=network, **network_kwargs) action_noise = None param_noise = None if noise_type is not None: for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') stddev = noise_level param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) max_action = env.action_space.high #print("actual max action", max_action) max_action = 1 logger.info( 'scaling actions by {} before executing in env'.format(max_action)) agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale) logger.info('Using agent with the following configuration:') eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) sess = U.get_session() # Prepare everything. agent.initialize(sess) sess.graph.finalize() agent.reset() obs = env.reset() if eval_env is not None: eval_obs = eval_env.reset() nenvs = obs.shape[0] episode_reward = np.zeros(nenvs, dtype=np.float32) #vector episode_step = np.zeros(nenvs, dtype=int) # vector episodes = 0 #scalar t = 0 # scalar epoch = 0 start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_actions = [] epoch_qs = [] epoch_episodes = 0 local_variables = { "epoch_episode_rewards": epoch_episode_rewards, "epoch_episode_steps": epoch_episode_steps, "batch_size": batch_size, "eval_env": eval_env, "reward_threshold": reward_threshold, "epoch_actions": epoch_actions, "nb_train_steps": nb_train_steps, "epoch_qs": epoch_qs, "start_time": start_time, "epoch_episodes": [epoch_episodes], "nb_epoch_cycles": nb_epoch_cycles, "nb_rollout_steps": nb_rollout_steps, "agent": agent, "memory": memory, "max_action": max_action, "env": env, "nenvs": nenvs, "obs": [obs], #Forgive me 6.031 "t": [t], "episode_reward": episode_reward, "episode_rewards_history": episode_rewards_history, "episode_step": episode_step, "episodes": [episodes], "rank": rank, "param_noise_adaption_interval": param_noise_adaption_interval, "noise_type": noise_type, "render": render } return local_variables
def learn( network, env, seed=None, total_timesteps=None, nb_epochs=None, # with default settings, perform 1M steps total nb_epoch_cycles=20, nb_rollout_steps=100, reward_scale=1.0, render=False, render_eval=False, noise_type='adaptive-param_0.2', normalize_returns=False, normalize_observations=True, actor_l2_reg=0.0, critic_l2_reg=1e-2, actor_lr=1e-4, critic_lr=1e-3, popart=False, gamma=0.99, clip_norm=None, nb_train_steps=50, # per epoch cycle and MPI worker, nb_eval_steps=1000, batch_size=64, # per MPI worker tau=0.01, eval_env=None, param_noise_adaption_interval=50, rb_size=1e6, save_interval=1, pretrain_epochs=0, load_path=None, demos_path=None, bc_teacher_lambda=0.0, use_qfilter=False, **network_kwargs): """Learns policy using DDPG, with vectorized environments. If we pass other arguments that aren't specified here, they are considered as network_kwargs. Parameters ---------- noise_type: for noise to be added to the behavior policy. They are NOT using the noise type from the paper but 'AdaptiveParamNoiseSpec'. I _think_ that if one does the OU process, we get action noise, but not parameter noise. Also, be sure to use `name_stdev` in that convention, as the code will split the argument at the underscores. actor_lr: 1e-4 (matches paper) critic_lr: 1e-3 (matches paper) critic_l2: 1e-2 (matches paper) gamma: 0.99 (matches paper) batch_size: 64 (matches paper for lower-dim env obs/states) tau: 0.01 for soft target updates of actor and critic nets. Paper used 0.001. nb_epoch_cycles: number of times we go through this cycle of: (1) get rollouts with noise added to policy and apply to replay buffer, (2) gradient updates for actor/critic, (3) evaluation rollouts (if any). AFTER all of these cycles happen, THEN we log statistics. nb_rollout_steps: number of steps in each parallel env we take with exploration policy without training, so this is just to populate the replay buffer. More parallel envs *should* mean that we get more samples in the buffer between each gradient updates of the network, so this might need to be environment *and* machine (# of CPUs) specific. nb_train_steps: after doing `nb_rollout_steps` in each parallel env, we do this many updates; each involves sampling from the replay buffer and updating the actor and critic (via lagged target updates). nb_eval_steps: 1000, I changed from the 100 as default. Using 1000 ensures that fixed length envs like Ant-v2 can get one full episode (assuming one parallel env) during evaluation stagtes. eval_env: A separate environment for evaluation only, where no noise is applied, similar to how rlkit does it. save_interval: Frequency between saving. """ set_global_seeds(seed) # Daniel: this helps to maintain compatibility with PPO2 code. For now # we're ignoring it, but we should check that we're always clipping. I # changed the nb_epochs to match with PPO2 in that we divide by nenvs. if 'limit_act_range' in network_kwargs: network_kwargs.pop('limit_act_range') nenvs = env.num_envs nbatchsize = nenvs * nb_epoch_cycles * nb_rollout_steps if total_timesteps is not None: assert nb_epochs is None nb_epochs = int(total_timesteps) // nbatchsize else: nb_epochs = 500 if MPI is not None: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 # we assume symmetric actions. nb_actions = env.action_space.shape[-1] assert (np.abs(env.action_space.low) == env.action_space.high).all() # Form XP (1M steps, same as in paper), and critic/actor networks. # Daniel: force dtype here so we can use uint8 type images. assert env.observation_space.low.dtype == env.observation_space.high.dtype memory = Memory(limit=int(rb_size), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape, dtype=env.observation_space.low.dtype) critic = Critic(network=network, **network_kwargs) actor = Actor(nb_actions, network=network, **network_kwargs) action_noise = None param_noise = None if noise_type is not None: for current_noise_type in noise_type.split(','): current_noise_type = current_noise_type.strip() if current_noise_type == 'none': pass elif 'adaptive-param' in current_noise_type: _, stddev = current_noise_type.split('_') param_noise = AdaptiveParamNoiseSpec( initial_stddev=float(stddev), desired_action_stddev=float(stddev)) elif 'normal' in current_noise_type: _, stddev = current_noise_type.split('_') #action_noise = NormalActionNoise(mu=np.zeros(nb_actions), # sigma=float(stddev)*np.ones(nb_actions)) #if nenvs > 1: # Daniel: adding this to replace the former. action_noise = NormalActionNoise(mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions), shape=(nenvs, nb_actions)) elif 'ou' in current_noise_type: _, stddev = current_noise_type.split('_') action_noise = OrnsteinUhlenbeckActionNoise( mu=np.zeros(nb_actions), sigma=float(stddev) * np.ones(nb_actions)) else: raise RuntimeError( 'unknown noise type "{}"'.format(current_noise_type)) max_action = env.action_space.high logger.info( 'scaling actions by {} before executing in env'.format(max_action)) # The `learn` defaults above have priority over defaults in DDPG class. agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, actor_l2_reg=actor_l2_reg, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, bc_teacher_lambda=bc_teacher_lambda, use_qfilter=use_qfilter) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) # Prepare everything. sess = U.get_session() agent.initialize(sess) # -------------------------------------------------------------------------- # Daniel: similar as PPO2 code as `agent` is similar to `model` but has to # be initialized explicitly above. Must call after `agent.load` gets # created. Not sure if this works with parameter space noise or with # normalization, but I don't plan to resume training (for now). It also has # to be *before* the `graph.finalize()` because otherwise we get an error. # -------------------------------------------------------------------------- if load_path is not None: logger.info("\nInside ddpg, loading model from: {}".format(load_path)) agent.load(load_path) # -------------------------------------------------------------------------- sess.graph.finalize() agent.reset() # -------------------------------------------------------------------------- # Daniel: populate replay buffer, followed by pretraining stage. # But if load_path is not None, then doesn't make sense -- we want to load. # We also don't need to do this if timesteps is 0 (e.g., for playing policy). # -------------------------------------------------------------------------- if total_timesteps == 0: return agent assert seed == 1500, 'We normally want seed 1500, yet: {}'.format(seed) if (demos_path is not None and load_path is None): _ddpg_demos(demos_path, agent, memory) assert memory.nb_entries == memory.nb_teach_entries, memory.nb_entries checkdir = osp.join(logger.get_dir(), 'checkpoints') os.makedirs(checkdir, exist_ok=True) # Pretrain, based on their training code for some # of minibatches. pt_actor_losses = [] pt_critic_losses = [] batches_per_ep = int(memory.nb_entries / batch_size) logger.info( 'Running pre-training for {} epochs'.format(pretrain_epochs)) logger.info(' data size in memory: {}'.format(memory.nb_entries)) logger.info(' each batch: {}, epoch mbs: {}'.format( batch_size, batches_per_ep)) pt_start = time.time() for epoch in range(1, pretrain_epochs + 1): c_losses = [] a_losses = [] for _ in range(batches_per_ep): cl, al = agent.train(during_pretrain=True) agent.update_target_net() c_losses.append(cl) a_losses.append(al) pt_critic_losses.append(np.mean(c_losses)) pt_actor_losses.append(np.mean(a_losses)) # Check and save model occasionally. if epoch == 1 or epoch % 5 == 0: pt_time = (time.time() - pt_start) / 60. logger.info( ' epoch done: {}, loss over past epoch: {:.4f}'.format( str(epoch).zfill(4), pt_actor_losses[-1])) logger.info(' critic loss over past epoch: {:.4f}'.format( pt_critic_losses[-1])) logger.info(' elapsed time: {:.1f}m'.format(pt_time)) savepath = osp.join( checkdir, 'pretrain_epoch_{}'.format(str(epoch).zfill(4))) logger.info('Saving model checkpoint to: ', savepath) agent.save(savepath) pt_time = (time.time() - pt_start) / 60. logger.info('losses a: {}'.format(np.array(pt_actor_losses))) logger.info('losses c: {}'.format(np.array(pt_critic_losses))) logger.info('Finished loading teacher samples + pre-training.') logger.info('Pre-training took {:.1f}m.\n'.format(pt_time)) # -------------------------------------------------------------------------- # Back to their code. For cloth, `env.reset()` takes a while so we put it here. obs = env.reset() if eval_env is not None: eval_obs = eval_env.reset() nenvs = obs.shape[0] # Daniel: Debugging/sanity checks. _variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) U.display_var_info(_variables) logger.info("\nInside DDPG, about to start epochs") logger.info( "nbatchsize: {}, get this in buffer before DDPG updates".format( nbatchsize)) logger.info(" i.e.: (nenv {}) * (cycles {}) * (nsteps {})".format( nenvs, nb_epoch_cycles, nb_rollout_steps)) logger.info("nb_epochs: {}, number of cycles to use".format(nb_epochs)) logger.info("eval_env None? {}".format(eval_env is None)) logger.info("(end of debugging messages)\n") # File paths. checkdir = osp.join(logger.get_dir(), 'checkpoints') action_dir = osp.join(logger.get_dir(), 'actions') episode_dir = osp.join(logger.get_dir(), 'ep_all_infos') os.makedirs(checkdir, exist_ok=True) os.makedirs(action_dir, exist_ok=True) os.makedirs(episode_dir, exist_ok=True) # Daniel: use these two to store past 100 episode history. Report these stats! eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) all_eval_episode_rewards = [] # reward/step: cumulative quantities for each episode in vecenv. # epoch_{actions,qs} will grow without bound, fyi. episode_reward = np.zeros(nenvs, dtype=np.float32) #vector episode_step = np.zeros(nenvs, dtype=int) # vector episodes = 0 #scalar t = 0 # scalar epoch = 0 start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_actions = [] epoch_qs = [] epoch_episodes = 0 for epoch in range(nb_epochs): mb_actions = [] mb_epinfos = [] for cycle in range(nb_epoch_cycles): # Perform rollouts. if nenvs > 1: # if simulating multiple envs in parallel, impossible to reset # agent at the end of the episode in each of the environments, # so resetting here instead agent.reset() # Daniel: pure data collection (noise added) to populate replay buffer. # No training until after this, and note the parallel stepping (VecEnv). for t_rollout in range(nb_rollout_steps): # Predict next action. # action: (#_parallel_envs, ac_dim), q: (#_parallel_envs, 1) action, q, _, _ = agent.step(obs, apply_noise=True, compute_Q=True) # Execute next action. if rank == 0 and render: env.render() # max_action is of dimension A, whereas action is dimension # (nenvs, A) - the multiplication gets broadcasted to the batch # scale for execution in env (as far as DDPG is concerned, # every action is in [-1, 1]) new_obs, r, done, info = env.step(max_action * action) r = r.astype(np.float32) # note these outputs are batched from vecenv (first dim = batch). t += 1 if rank == 0 and render: env.render() episode_reward += r episode_step += 1 # Book-keeping. (Daniel: agent.train() doesn't require these two lists) epoch_actions.append(action) epoch_qs.append(q) # Daniel: Same as PPO2 code. mb_actions.append(action) for inf in info: maybeepinfo = inf.get('episode') if maybeepinfo: mb_epinfos.append(inf) #the batched data will be unrolled in memory.py's append. agent.store_transition(obs, action, r, new_obs, done) obs = new_obs for d in range(len(done)): if done[d]: # Episode done. epoch_episode_rewards.append( episode_reward[d]) # Entire history episode_rewards_history.append( episode_reward[d]) # Last 100 only epoch_episode_steps.append(episode_step[d]) episode_reward[d] = 0. episode_step[d] = 0 epoch_episodes += 1 episodes += 1 if nenvs == 1: agent.reset() # Train. epoch_actor_losses = [] epoch_critic_losses = [] epoch_adaptive_distances = [] for t_train in range(nb_train_steps): # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) cl, al = agent.train() epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() # Evaluate. (Daniel: note that no noise is applied here.) # Also it seems like episodes do not naturally reset before this starts? # Also, unlike epoch_episode_reward, here we create eval_episode_reward here ... eval_episode_rewards = [] eval_qs = [] if eval_env is not None: logger.info('Now on the eval_env for {} steps...'.format( nb_eval_steps)) nenvs_eval = eval_obs.shape[0] eval_episode_reward = np.zeros(nenvs_eval, dtype=np.float32) for t_rollout in range(nb_eval_steps): eval_action, eval_q, _, _ = agent.step(eval_obs, apply_noise=False, compute_Q=True) # scale for execution in env (for DDPG, every action is in [-1, 1]) eval_obs, eval_r, eval_done, eval_info = eval_env.step( max_action * eval_action) if render_eval: eval_env.render() eval_episode_reward += eval_r eval_qs.append(eval_q) for d in range(len(eval_done)): if eval_done[d]: eval_episode_rewards.append(eval_episode_reward[d]) eval_episode_rewards_history.append( eval_episode_reward[d]) all_eval_episode_rewards.append( eval_episode_reward[d]) eval_episode_reward[ d] = 0.0 # Daniel: reset for next episode. if MPI is not None: mpi_size = MPI.COMM_WORLD.Get_size() else: mpi_size = 1 # Log stats. # XXX shouldn't call np.mean on variable length lists duration = time.time() - start_time stats = agent.get_stats() combined_stats = stats.copy() combined_stats['memory/nb_entries'] = memory.nb_entries combined_stats['rollout/return'] = np.mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = np.mean( episode_rewards_history) combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps) combined_stats['rollout/actions_mean'] = np.mean(epoch_actions) combined_stats['rollout/Q_mean'] = np.mean(epoch_qs) combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses) combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = np.mean( epoch_adaptive_distances) combined_stats['total/duration'] = duration combined_stats['total/steps_per_second'] = float(t) / float(duration) combined_stats['total/episodes'] = episodes combined_stats['rollout/episodes'] = epoch_episodes combined_stats['rollout/actions_std'] = np.std(epoch_actions) # Evaluation statistics. (Daniel: use eval/return_history for plots) if eval_env is not None: combined_stats['eval/return'] = np.mean(eval_episode_rewards) combined_stats['eval/return_history'] = np.mean( eval_episode_rewards_history) combined_stats['eval/Q'] = eval_qs combined_stats['eval/episodes'] = len(eval_episode_rewards) def as_scalar(x): if isinstance(x, np.ndarray): assert x.size == 1 return x[0] elif np.isscalar(x): return x else: raise ValueError('expected scalar, got %s' % x) combined_stats_sums = np.array( [np.array(x).flatten()[0] for x in combined_stats.values()]) if MPI is not None: combined_stats_sums = MPI.COMM_WORLD.allreduce(combined_stats_sums) combined_stats = { k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums) } # Total statistics. combined_stats['total/epochs'] = epoch + 1 combined_stats['total/steps_per_env'] = t for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) if rank == 0: logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(osp.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(osp.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(), f) # Daniel: arguable, we can save all episodes but hard if we don't know the steps. #if eval_env: # with open(os.path.join(logdir, 'all_eval_episode_rewards.pkl'), 'wb') as f: # pickle.dump(all_eval_episode_rewards, f) # Daniel: we can use cycle or epoch for this if condition ... kind of annoying but w/e. if cycle % save_interval == 0: logger.info('We are now saving stuff!!') savepath = osp.join(checkdir, '%.5i' % epoch) logger.info('Saving model checkpoint to: ', savepath) agent.save(savepath) # ------------------------------------------------------------------ # Daniel: extra stuff for debugging PPO on cloth, actions and infos for each episode. mb_actions = _sf01(np.asarray(mb_actions)) act_savepath = osp.join(action_dir, 'actions_%.5i.pkl' % epoch) epi_savepath = osp.join(episode_dir, 'infos_%.5i.pkl' % epoch) with open(act_savepath, 'wb') as fh: pickle.dump(mb_actions, fh) with open(epi_savepath, 'wb') as fh: pickle.dump(mb_epinfos, fh) # Daniel: we were not resetting earlier. Actually there are other # epoch_stats which we might consider resetting here? epoch_episodes = 0 return agent