def __init__(self, env, hyper_params, batch_size, update_steps, memory_size, beta, model_replace_freq, learning_rate, use_target_model=True, memory=Memory_Server, action_space=2, training_episodes=7000, test_interval=50): # super().__init__(update_steps, memory_size, model_replace_freq, learning_rate, beta=0.99, batch_size = 32, use_target_model=True) self.batch_size = batch_size state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=0.0003) self.target_model = DQNModel(input_len, output_len) self.steps = 0 self.memory = memory # self.memory = ReplayBuffer(hyper_params['memory_size']) self.prev = 0 self.next = 0 self.model_dq = deque() self.result = [0] * ((training_episodes // test_interval) + 1) self.previous_q_networks = [] self.result_count = 0 self.learning_episodes = training_episodes self.episode = 0 self.is_collection_completed = False self.evaluator_done = False self.batch_num = training_episodes // test_interval self.use_target_model = True self.beta = 0.99 self.test_interval = test_interval
def __init__(self, env, hyper_params, memory, action_space): self.epsilon_decay_steps = hyper_params['epsilon_decay_steps'] self.final_epsilon = hyper_params['final_epsilon'] self.batch_size = hyper_params['batch_size'] self.update_steps = hyper_params['update_steps'] self.beta = hyper_params['beta'] self.model_replace_freq = hyper_params['model_replace_freq'] self.learning_rate = hyper_params['learning_rate'] self.training_episodes = hyper_params['training_episodes'] self.test_interval = hyper_params['test_interval'] self.memory = memory self.episode = 0 self.steps = 0 self.result_count = 0 self.next = 0 self.batch_num = self.training_episodes // self.test_interval state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate']) self.target_model = DQNModel(input_len, output_len) self.results = [0] * (self.batch_num + 1) self.previous_q_networks = [] self.collector_done = False self.evaluator_done = False
def __init__(self, env, hyper_params, action_space=len(ACTION_DICT)): self.env = env self.max_episode_steps = env._max_episode_steps self.beta = hyper_params['beta'] self.initial_epsilon = 1 self.final_epsilon = hyper_params['final_epsilon'] self.epsilon_decay_steps = hyper_params['epsilon_decay_steps'] self.episode = 0 self.steps = 0 self.best_reward = 0 self.learning = True self.action_space = action_space state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate']) self.use_target_model = hyper_params['use_target_model'] if self.use_target_model: self.target_model = DQNModel(input_len, output_len) self.memory = ReplayBuffer(hyper_params['memory_size']) self.batch_size = hyper_params['batch_size'] self.update_steps = hyper_params['update_steps'] self.model_replace_freq = hyper_params['model_replace_freq']
def train(environment, starting_model_path=None, episodes=15000): if starting_model_path: policy_model = DQNModel.load(starting_model_path) target_model = DQNModel.load(starting_model_path) print('loaded model {}'.format(starting_model_path)) else: print('starting model from scratch') policy_model = DQNModel() target_model = DQNModel() target_model.set_weights(policy_model.get_weights()) print('Begin training...') replay_memory = [] epsilon = 0.0 for episode_i in range(episodes): replay_memory += play_out_episode(policy_model, environment, epsilon) replay_memory = replay_memory[-hparams['max_mem_size']:] epsilon = max(hparams['min_epsilon'], epsilon*hparams['epsilon_decay']) if len(replay_memory) >= hparams['min_mem_size']: do_training_step(policy_model, target_model, random.sample(replay_memory, hparams['batch_size'])) if episode_i % hparams['target_model_update_every'] == 0: target_model.set_weights(policy_model.get_weights()) if episode_i % hparams['evaluation_every'] == 0: info = evaluate_model(policy_model, environment) print('===================== episode {}, epsilon {}'.format(episode_i, epsilon)) print(info) print('======================================') policy_model.save('checkpoint-{}'.format(episode_i))
def __init__(self, env, hyper_params, action_space=len(ACTION_DICT)): self.env = env self.max_episode_steps = env._max_episode_steps """ beta: The discounted factor of Q-value function (epsilon): The explore or exploit policy epsilon. initial_epsilon: When the 'steps' is 0, the epsilon is initial_epsilon, 1 final_epsilon: After the number of 'steps' reach 'epsilon_decay_steps', The epsilon set to the 'final_epsilon' determinately. epsilon_decay_steps: The epsilon will decrease linearly along with the steps from 0 to 'epsilon_decay_steps'. """ self.beta = hyper_params['beta'] self.initial_epsilon = 1 self.final_epsilon = hyper_params['final_epsilon'] self.epsilon_decay_steps = hyper_params['epsilon_decay_steps'] """ episode: Record training episode steps: Add 1 when predicting an action learning: The trigger of agent learning. It is on while training agent. It is off while testing agent. action_space: The action space of the current environment, e.g 2. """ self.episode = 0 self.steps = 0 self.best_reward = 0 self.learning = True self.action_space = action_space """ input_len The input length of the neural network. It equals to the length of the state vector. output_len: The output length of the neural network. It is equal to the action space. eval_model: The model for predicting action for the agent. target_model: The model for calculating Q-value of next_state to update 'eval_model'. use_target_model: Trigger for turn 'target_model' on/off """ state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate']) self.use_target_model = hyper_params['use_target_model'] if self.use_target_model: self.target_model = DQNModel(input_len, output_len) # memory: Store and sample experience replay. self.memory = ReplayBuffer(hyper_params['memory_size']) """ batch_size: Mini batch size for training model. update_steps: The frequence of traning model model_replace_freq: The frequence of replacing 'target_model' by 'eval_model' """ self.batch_size = hyper_params['batch_size'] self.update_steps = hyper_params['update_steps'] self.model_replace_freq = hyper_params['model_replace_freq'] print("agent initialized")
def learn_and_evaluate(self): workers_id = [] batch_size = self.parms['training_episodes'] // self.parms['workers'][0] for _ in range(self.parms['workers'][0]): workers_id.append(collecting_worker.remote(self.env, self.model_server, self.memory_server, batch_size)) all_results = [] if self.parms['do_test']: eval_model = DQNModel(len(env.reset()), len(ACTION_DICT)) learn_done, filedir = False, "" workers_num = self.parms['workers'][1] interval = self.parms['test_interval']//workers_num while not learn_done: filedir, learn_done = ray.get(self.memory_server.get_evaluate_filedir.remote()) if not filedir: continue eval_model.load(filedir) start_time, total_reward = time.time(), 0 eval_workers = [] for _ in range(workers_num): eval_workers.append(evaluation_worker_test2.remote(self.env, self.memory_server, eval_model, interval)) avg_reward = sum(ray.get(eval_workers))/workers_num print(filedir, avg_reward, (time.time() - start_time)) all_results.append(avg_reward) return all_results
def __init__(self, env, memory, action_space=2, test_interval=50): self.collector_done = False self.evaluator_done = False self.env = env # self.max_episode_steps = env._max_episode_steps self.max_episode_steps = 200 self.beta = hyperparams_CartPole['beta'] self.initial_epsilon = 1 self.final_epsilon = hyperparams_CartPole['final_epsilon'] self.epsilon_decay_steps = hyperparams_CartPole['epsilon_decay_steps'] self.batch_size = hyperparams_CartPole['batch_size'] self.episode = 0 self.steps = 0 self.best_reward = 0 self.learning = True self.action_space = action_space self.previous_q_models = [] self.results = [0] * (self.batch_size + 1) self.reuslt_count = 0 self.episode = 0 self.test_interval = test_interval self.memory = memory state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=hyperparams_CartPole['learning_rate']) self.use_target_model = hyperparams_CartPole['use_target_model'] if self.use_target_model: self.target_model = DQNModel(input_len, output_len) # # memory: Store and sample experience replay. # self.memory = ReplayBuffer(hyper_params['memory_size']) self.batch_size = hyperparams_CartPole['batch_size'] self.update_steps = hyperparams_CartPole['update_steps'] self.model_replace_freq = hyperparams_CartPole['model_replace_freq']
def __init__(self, env, hyper_params, memory_server): """ input_len The input length of the neural network. It equals to the length of the state vector. output_len: The output length of the neural network. It is equal to the action space. eval_model: The model for predicting action for the agent. target_model: The model for calculating Q-value of next_state to update 'eval_model'. use_target_model: Trigger for turn 'target_model' on/off """ self.beta = hyper_params['beta'] state = env.reset() action_space = len(ACTION_DICT) input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate']) self.use_target_model = hyper_params['use_target_model'] if self.use_target_model: self.target_model = DQNModel(input_len, output_len) self.memory_server = memory_server
def __init__(self, learning_rate, training_episodes, memory, env, test_interval=50, batch_size=32, action_space=len(ACTION_DICT), beta=0.99): self.env = env #self.max_episode_steps = env._max_episode_steps self.batch_num = training_episodes // test_interval self.steps = 0 self.collector_done = False self.evaluator_done = False self.training_episodes = training_episodes self.episode = 0 #self.esults = [] self.batch_size = batch_size self.privous_q_model = [] self.results = [0] * (self.batch_num + 1) self.result_count = 0 self.memory = memory self.use_target_model = True state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=0.0003) self.target_model = DQNModel(input_len, output_len) self.batch_size = hyper_params['batch_size'] self.update_steps = hyper_params['update_steps'] self.model_replace_freq = hyper_params['model_replace_freq']
def __init__(self, hyper_params, memory_server, nb_agents, nb_evaluators, action_space=len(ACTION_DICT)): self.beta = hyper_params['beta'] self.initial_epsilon = 1 self.final_epsilon = hyper_params['final_epsilon'] self.epsilon_decay_steps = hyper_params['epsilon_decay_steps'] self.hyper_params = hyper_params self.update_steps = hyper_params['update_steps'] self.model_replace_freq = hyper_params['model_replace_freq'] self.action_space = action_space self.batch_size = hyper_params['batch_size'] self.memory_server = memory_server self.nb_agents = nb_agents self.nb_evaluators = nb_evaluators env = CartPoleEnv() state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate']) self.target_model = DQNModel(input_len, output_len) self.agents = [ DQN_agent_remote.remote(CartPoleEnv(), memory_server, hyper_params, action_space, i) for i in range(nb_agents) ] self.evaluators = [ EvalWorker.remote(self.eval_model, CartPoleEnv(), hyper_params['max_episode_steps'], hyper_params['eval_trials'], i) for i in range(nb_evaluators) ]
def __init__(self, env, hyper_params, memo_server): self.memory_server = memo_server self.env = env self.max_episode_steps = env._max_episode_steps self.beta = hyper_params['beta'] self.training_episodes = hyper_params['training_episodes'] self.test_interval = hyper_params['test_interval'] action_space = len(ACTION_DICT) self.episode = 0 self.steps = 0 self.best_reward = 0 self.learning = True self.action_space = action_space state = env.reset() input_len = len(state) output_len = action_space self.eval_model = DQNModel(input_len, output_len, learning_rate=hyper_params['learning_rate']) self.use_target_model = hyper_params['use_target_model'] if self.use_target_model: self.target_model = DQNModel(input_len, output_len) self.batch_size = hyper_params['batch_size'] self.update_steps = hyper_params['update_steps'] self.model_replace_freq = hyper_params['model_replace_freq'] self.collector_done = False self.results = [] self.initial_epsilon = 1 self.final_epsilon = hyper_params['final_epsilon'] self.epsilon_decay_steps = hyper_params['epsilon_decay_steps'] self.replace_targe_cnt = 0 self.epsilon = 1 self.eval_models_seq = 1
def evaluation_worker(env, mem_server, trials): eval_model = DQNModel(len(env.reset()), len(ACTION_DICT)) learn_done, filedir = False, "" while not learn_done: filedir, learn_done = ray.get(mem_server.get_evaluate_filedir.remote()) if not filedir: continue eval_model.load(filedir) start_time, total_reward = time.time(), 0 for _ in range(trials): state, done, steps = env.reset(), False, 0 while steps < env._max_episode_steps and not done: steps += 1 state, reward, done, _ = env.step(eval_model.predict(state)) total_reward += reward mem_server.add_results.remote(total_reward / trials)
def __init__(self, name): """ :param name: name of the rl_component """ # name of the rl_component self.name = name # True if the model was set up self.is_model_init = False # Service for communicating the activations self._get_activation_service = rospy.Service( name + 'GetActivation', GetActivation, self._get_activation_state_callback) # choose appropriate model self.model = DQNModel(self.name) # save the last state self.last_state = None # the dimensions of the model self.number_outputs = -1 self.number_inputs = -1 self._unregistered = False rospy.on_shutdown( self.unregister) # cleanup hook also for saving the model.
def train_main(exp_prefix="", fc_units=[128, 64, 64], env_list=[], num_envs=10, num_obstacls_ratio=[0.2, 0.3, 0.3, 0.2], n_step=1, max_episodes=10000, max_steps=120, per_num_envs=8, replay_buffer_len=400, no_replay=False, batch_size=64, learning_rate=1e-4, epsilon_min=0.05, epsilon_max=0.10, gamma=0.98, without_map_info=False, save_interval=1000, show=False): # create envs if len(env_list) == 0: env_list = create_or_load_envs(num_envs, num_obstacls_ratio) # create model if without_map_info: state_dims = 2 + 1 else: state_dims = 4 * (2 + 2) + 6 + 2 + 2 act_dims = 5 model = DQNModel(state_dims=state_dims, act_dims=act_dims, fc_units=fc_units) print("create model done") # optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate) # create replay buffer buffer = ReplayBuffer(replay_buffer_len) print("create buffer done") # construct save path suffix weight_dir = os.path.join("weights", exp_prefix) dir_util.mkpath(weight_dir) log_dir = os.path.join("logs", exp_prefix) dir_util.mkpath(log_dir) summary_writer = tf.summary.create_file_writer(log_dir) # run simulations mean_loss_vals = [] mean_ep_rewards = [] last_save_ep_idx = 0 for ep in range(max_episodes // per_num_envs): if no_replay: buffer.clear() num_new_samples = 0 ep_rewards = [] # randomly select an env and run rollout envs = np.random.choice(env_list, size=(per_num_envs)) env_indices = np.random.randint(len(env_list), size=(per_num_envs)) for roll_idx, env_idx in enumerate(env_indices): env = env_list[env_idx] episode_index = ep * per_num_envs + roll_idx epsilon = epsilon_max - ( epsilon_max - epsilon_min) / max_episodes * episode_index ship_state_trace, input_states, action_list, reward_list, done_list, is_random_act_list, qvals = run_one_episodes( env, model, epsilon, max_steps, without_map_info) # td_errors = (reward_list + qvals[1:] * gamma) - qvals[:-1] td_errors = get_n_step_estimated_qvals(reward_list, qvals[1:], gamma, n_step) - qvals[:-1] buffer.add_items(input_states, action_list, reward_list, done_list, td_errors) num_new_samples += len(input_states) ep_rewards.append(np.sum(reward_list)) print( "episode {:4d}, env-{:03d}, epsilon: {:4.2f}, episode length: {:3d}, ep_reward: {:8.2f}" .format(episode_index, env_idx, epsilon, len(input_states), np.sum(reward_list))) tot_ep_reward = np.sum(reward_list) avg_ep_reward = np.mean(reward_list) with summary_writer.as_default(): tf.summary.scalar('tot_ep_reward_trn', tot_ep_reward, step=episode_index) tf.summary.scalar('avg_ep_reward_trn', avg_ep_reward, step=episode_index) if episode_index % 100 == 0: # run an evaluation (eval_ship_state_trace, eval_input_states, eval_action_list, eval_reward_list, eval_done_list, eval_is_random_act_list, eval_qval_list) = run_one_episodes(env, model, 0, max_steps, without_map_info) # log episode reward with summary_writer.as_default(): eval_tot_ep_reward = np.sum(eval_reward_list) eval_avg_ep_reward = np.mean(eval_reward_list) tf.summary.scalar('tot_ep_reward_evl', eval_tot_ep_reward, step=episode_index) tf.summary.scalar('avg_ep_reward_evl', eval_avg_ep_reward, step=episode_index) # eval the loss eval_states_curr = np.array(eval_input_states[:-1]) eval_states_next = np.array(eval_input_states[1:]) eval_qvals_next = model(eval_states_next, training=False).numpy() eval_qvals_next_max = np.amax( eval_qvals_next, axis=1) * (1 - np.array(eval_done_list)) eval_qvals_esti = get_n_step_estimated_qvals( eval_reward_list, eval_qvals_next_max, gamma, n_step) # to tensor eval_states_curr = tf.convert_to_tensor( eval_states_curr, tf.float32) eval_action_list_tf = tf.convert_to_tensor(eval_action_list) eval_qvals_esti = tf.convert_to_tensor(eval_qvals_esti, tf.float32) # eval to get loss eval_loss = eval_step_v0(model, eval_states_curr, eval_action_list_tf, eval_qvals_esti).numpy() with summary_writer.as_default(): tf.summary.scalar('loss_evl', eval_loss, step=episode_index) # draw map and state trace env.show(eval_ship_state_trace, np.sum(eval_reward_list), eval_loss, eval_action_list, eval_is_random_act_list, save_path="pictures", prefix=exp_prefix, count=episode_index) # run update avg_ep_reward = float(np.mean(ep_rewards)) mean_ep_rewards.append(avg_ep_reward) curr_update_loss_vals = [] if no_replay: num_updates = 1 else: num_updates = max( 1, min(num_new_samples, replay_buffer_len) // batch_size) for _ in range(num_updates): # get qvals of next states if no_replay: batch_size = max(1, int(num_new_samples * 0.8)) # overwrite batch_size states_curr, states_next, actions, rewards, dones = buffer.sample( batch_size) states_next = tf.convert_to_tensor(states_next, tf.float32) qvals_next = model(states_next, training=False).numpy() qvals_next = np.amax(qvals_next, axis=1) * (1 - dones) qvals_esti = get_n_step_estimated_qvals(rewards, qvals_next, gamma, n_step) # to tensor states_curr = tf.convert_to_tensor(states_curr, tf.float32) actions = tf.convert_to_tensor(actions) qvals_esti = tf.convert_to_tensor(qvals_esti, tf.float32) # do an update loss_trn = train_step_v0(model, optimizer, states_curr, actions, qvals_esti).numpy() with summary_writer.as_default(): tf.summary.scalar('loss_trn', loss_trn, step=episode_index) curr_update_loss_vals.append(loss_trn) print("episode {:4d}, bs: {:4d}, loss_trn: {:6.2f}".format( episode_index, batch_size, loss_trn)) mean_loss_vals.append(float(np.mean(curr_update_loss_vals))) # draw loss if ep > 0 and ep % 10 == 0: draw_vals(mean_ep_rewards, mean_loss_vals, per_num_envs, exp_prefix=exp_prefix) # save to file for further use json.dump([mean_loss_vals, mean_ep_rewards], open("logs/{}_logs_info.json".format(exp_prefix), "w")) # Save the weights using the `checkpoint_path` format if (episode_index - last_save_ep_idx) > save_interval: save_path = os.path.join( weight_dir, "weights_{:05d}.ckpt".format(episode_index)) model.save_weights(save_path) last_save_ep_idx = episode_index print("episode-{}, save weights to: {}".format( episode_index, save_path))