def playActor(self): self.load("NetworkParam/FinalParam") hdg0_rand_vec=[0,7,12] ''' WIND CONDITIONS ''' mean = 45 * TORAD std = 0.1 * TORAD wind_samples = 10 w = wind(mean=mean, std=std, samples = wind_samples) try: for i in range(len(hdg0_rand_vec)): # Initial state WH = w.generateWind() hdg0_rand = hdg0_rand_vec[i] hdg0 = hdg0_rand * TORAD * np.ones(10) s = self.env.reset(hdg0,WH) episode_reward = 0 episode_step=0 v_episode=[] i_episode=[] while episode_step < 40: #not done: if episode_step==0: i_episode.append(hdg0_rand+WH[0]/TORAD-40) else: i_episode.append(s[0][-1]/TORAD) s = np.reshape([s[0,:], s[1,:]], [self.state_size,1]) a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: s[None]}) a = np.clip(a, self.low_bound, self.high_bound) s_, r = self.env.act(a,WH) episode_reward += r v_episode.append(r) episode_step += 1 s = s_ DISPLAYER.displayVI(v_episode,i_episode,i) print("Episode reward :", episode_reward," for incidence: ",hdg0_rand) except KeyboardInterrupt as e: pass except Exception as e: print("Exception :", e) finally: print("End of the demo")
def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size()[0] self.action_size = self.env.get_action_size() self.low_bound, self.high_bound = self.env.get_bounds() self.buffer = ExperienceBuffer() print("Creation of the actor-critic network") self.network = Network(self.state_size, self.action_size, self.low_bound, self.high_bound) self.sess.run(tf.global_variables_initializer()) DISPLAYER.reset()
def run(self): self.total_steps = 1 self.sess.run(self.network.target_init) self.z = self.sess.run(self.network.z) self.delta_z = self.network.delta_z ep = 1 while ep < settings.TRAINING_EPS + 1 and not GUI.STOP: s = self.env.reset() episode_reward = 0 episode_step = 0 done = False memory = deque() # Initialize exploration noise process noise_scale = settings.NOISE_SCALE * settings.NOISE_DECAY**ep # Initial state self.env.set_render(GUI.render.get(ep)) self.env.set_gif(GUI.gif.get(ep)) plot_distrib = GUI.plot_distrib.get(ep) max_eps = settings.MAX_EPISODE_STEPS + (ep // 50) while episode_step < max_eps and not done: noise = np.random.normal(size=self.action_size) scaled_noise = noise_scale * noise a = np.clip( self.predict_action(s, plot_distrib) + scaled_noise, *self.bounds) s_, r, done, info = self.env.act(a) episode_reward += r memory.append((s, a, r, s_, 0 if done else 1)) if len(memory) >= settings.N_STEP_RETURN: s_mem, a_mem, discount_r, ss_mem, done_mem = memory.popleft( ) for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_r += ri * settings.DISCOUNT**(i + 1) BUFFER.add(s_mem, a_mem, discount_r, s_, 0 if done else 1) if len( BUFFER ) > 0 and self.total_steps % settings.TRAINING_FREQ == 0: self.network.train(BUFFER.sample(), self.critic_lr, self.actor_lr) s = s_ episode_step += 1 self.total_steps += 1 self.critic_lr -= self.delta_critic_lr self.actor_lr -= self.delta_actor_lr # Plot reward plot = GUI.plot.get(ep) DISPLAYER.add_reward(episode_reward, plot) # Print episode reward if GUI.ep_reward.get(ep): print( 'Episode %2i, Reward: %7.3f, Steps: %i, Final noise scale: %7.3f, Critic LR: %f, Actor LR: %f' % (ep, episode_reward, episode_step, noise_scale, self.critic_lr, self.actor_lr)) # Save the model if GUI.save.get(ep): SAVER.save(ep) ep += 1
def run(self): print("Beginning of the run...") self.pre_train() self.total_steps = 0 self.nb_ep = 1 while self.nb_ep < parameters.TRAINING_STEPS: self.learning_rate = self.initial_learning_rate * \ (parameters.TRAINING_STEPS - self.nb_ep) / \ parameters.TRAINING_STEPS s = self.env.reset() episode_reward = 0 done = False memory = deque() discount_R = 0 episode_step = 0 max_step = parameters.MAX_EPISODE_STEPS + \ self.nb_ep // parameters.EP_ELONGATION # Render parameters self.env.set_render(self.nb_ep % parameters.RENDER_FREQ == 0) while episode_step < max_step and not done: if random.random() < self.epsilon: a = random.randint(0, self.action_size - 1) else: a = self.sess.run(self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s_, r, done, info = self.env.act(a) episode_reward += r memory.append((s, a, r, s_, done)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = r_mem for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT ** (i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) if episode_step % parameters.TRAINING_FREQ == 0: train_batch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) # Incr beta if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR feed_dict = {self.mainQNetwork.inputs: train_batch[0]} oldQvalues = self.sess.run(self.mainQNetwork.Qvalues, feed_dict=feed_dict) tmp = [0] * len(oldQvalues) for i, oldQvalue in enumerate(oldQvalues): tmp[i] = oldQvalue[train_batch[1][i]] oldQvalues = tmp feed_dict = {self.mainQNetwork.inputs: train_batch[3]} mainQaction = self.sess.run(self.mainQNetwork.predict, feed_dict=feed_dict) feed_dict = {self.targetQNetwork.inputs: train_batch[3]} targetQvalues = self.sess.run(self.targetQNetwork.Qvalues, feed_dict=feed_dict) # Done multiplier : # equals 0 if the episode was done # equals 1 else done_multiplier = (1 - train_batch[4]) doubleQ = targetQvalues[range(parameters.BATCH_SIZE), mainQaction] targetQvalues = train_batch[2] + \ parameters.DISCOUNT * doubleQ * done_multiplier errors = np.square(targetQvalues - oldQvalues) + 1e-6 self.buffer.update_priorities(train_batch[6], errors) feed_dict = {self.mainQNetwork.inputs: train_batch[0], self.mainQNetwork.Qtarget: targetQvalues, self.mainQNetwork.actions: train_batch[1], self.mainQNetwork.learning_rate: self.learning_rate} _ = self.sess.run(self.mainQNetwork.train, feed_dict=feed_dict) update_target(self.update_target_ops, self.sess) s = s_ episode_step += 1 self.total_steps += 1 # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY DISPLAYER.add_reward(episode_reward) # if episode_reward > self.best_run and \ # self.nb_ep > 50: # self.best_run = episode_reward # print("Save best", episode_reward) # SAVER.save('best') # self.play(1) self.total_steps += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print('Episode %2i, Reward: %7.3f, Steps: %i, Epsilon: %.3f' ', Max steps: %i, Learning rate: %g' % ( self.nb_ep, episode_reward, episode_step, self.epsilon, max_step, self.learning_rate)) # Save the model if self.nb_ep % parameters.SAVE_FREQ == 0: SAVER.save(self.nb_ep) self.nb_ep += 1
from Displayer import DISPLAYER from Saver import SAVER import parameters if __name__ == '__main__': tf.reset_default_graph() with tf.Session() as sess: agent = Agent(sess) SAVER.set_sess(sess) SAVER.load(agent) print("Beginning of the run") try: agent.run() except KeyboardInterrupt: pass print("End of the run") SAVER.save(agent.total_steps) DISPLAYER.disp() # agent.play(10) # agent.play(3, "results/gif/".format(parameters.ENV)) agent.close()
def run(self): self.total_steps = 0 for ep in range(1, parameters.TRAINING_STEPS + 1): episode_reward = 0 episode_step = 0 done = False # Initial state s = self.env.reset() self.env.set_render(ep % 1000 == 0) gif = (ep % 1500 == 0) step_allonge = ep // 1000 while episode_step < parameters.MAX_EPISODE_STEPS + step_allonge \ and not done: if random.random() < self.epsilon: a = self.env.random() else: # choose action based on deterministic policy a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: [s]}) s_, r, done, info = self.env.act(a, gif) episode_reward += r self.buffer.add((s, a, r, s_, 0.0 if done else 1.0)) # update network weights to fit a minibatch of experience if self.total_steps % parameters.TRAINING_FREQ == 0 and \ len(self.buffer) >= parameters.BATCH_SIZE: minibatch = self.buffer.sample() _, _ = self.sess.run([self.network.critic_train_op, self.network.actor_train_op], feed_dict={ self.network.state_ph: np.asarray([elem[0] for elem in minibatch]), self.network.action_ph: np.asarray([elem[1] for elem in minibatch]), self.network.reward_ph: np.asarray([elem[2] for elem in minibatch]), self.network.next_state_ph: np.asarray([elem[3] for elem in minibatch]), self.network.is_not_terminal_ph: np.asarray([elem[4] for elem in minibatch])}) # update target networks _ = self.sess.run(self.network.update_slow_targets_op) s = s_ episode_step += 1 self.total_steps += 1 # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= self.epsilon_decay if gif: self.env.save_gif('results/gif/', self.n_gif) self.n_gif = (self.n_gif + 1) % 5 if episode_reward > self.best_run: self.best_run = episode_reward print("Save best", episode_reward) SAVER.save('best') DISPLAYER.add_reward(episode_reward) if ep % 50 == 0: print('Episode %2i, Reward: %7.3f, Steps: %i, Epsilon: %7.3f' ' (max step: %i)' % (ep, episode_reward, episode_step, self.epsilon, parameters.MAX_EPISODE_STEPS + step_allonge)) if ep % 500 == 0: DISPLAYER.disp()
SAVER.set_sess(sess) SAVER.load() # Run threads that each contains one worker worker_threads = [] for i, worker in enumerate(workers): print("Threading worker", i + 1) sleep(0.05) work = lambda: worker.work(sess, coord) t = threading.Thread(target=(work)) t.start() worker_threads.append(t) sleep(0.1) try: # Wait till all the workers are done coord.join(worker_threads) except Exception as e: coord.request_stop(e) except KeyboardInterrupt as e: coord.request_stop() finally: print("End of the training") DISPLAYER.disp_all() DISPLAYER.disp_one() DISPLAYER.disp_seq() master_agent.play(sess, 10) master_agent.play(sess, 1, "results/gif/{}_1.gif".format(settings.ENV))
import tensorflow as tf from Agent import Agent from Displayer import DISPLAYER import parameters if __name__ == '__main__': tf.reset_default_graph() with tf.Session() as sess: agent = Agent(sess) print("Beginning of the run") try: print("I do not do the run") #agent.run() except KeyboardInterrupt: agent.save("NetworkParam/FinalParam") print("End of the run") DISPLAYER.dispR() agent.playActor() agent.playCritic()
def playCritic(self): self.load("NetworkParam/FinalParam") hdg0_rand_vec=[0,7,12] ''' WIND CONDITIONS ''' mean = 45 * TORAD std = 0.1 * TORAD wind_samples = 10 w = wind(mean=mean, std=std, samples = wind_samples) try: for i in range(len(hdg0_rand_vec)): # Initial state WH = w.generateWind() hdg0_rand = hdg0_rand_vec[i] hdg0 = hdg0_rand * TORAD * np.ones(10) s = self.env.reset(hdg0,WH) episode_reward = 0 episode_step=0 v_episode=[] i_episode=[] while episode_step < 30: #not done: if episode_step==0: i_episode.append(hdg0_rand+WH[0]/TORAD-40) else: i_episode.append(s[0][-1]/TORAD) # Critic policy critic = [self.evaluate(s, -1.5),self.evaluate(s, -1.25),self.evaluate(s, -1), self.evaluate(s, -0.75),self.evaluate(s, -0.5),self.evaluate(s, -0.25),self.evaluate(s, 0),self.evaluate(s, 0.25), self.evaluate(s, 0.5),self.evaluate(s, 0.75),self.evaluate(s, 1),self.evaluate(s, 1.25), self.evaluate(s, 1.5)] a = np.argmax(critic) if a == 0: a = -1.5 if a == 1: a = -1.25 if a == 2: a = -1 if a == 3: a = -0.75 if a == 4: a = -0.5 if a == 5: a = -0.25 if a == 6: a = 0 if a == 7: a = 0.25 if a == 8: a = 0.5 if a == 9: a = 0.75 if a == 10: a = 1 if a == 11: a = 1.25 if a == 12: a = 1.5 s_, r = self.env.act(a,WH) episode_reward += r v_episode.append(r) episode_step += 1 s = s_ DISPLAYER.displayVI(v_episode,i_episode,i+3) print("Episode reward :", episode_reward," for incidence: ",hdg0_rand) except KeyboardInterrupt as e: pass except Exception as e: print("Exception :", e) finally: print("End of the demo")
def work(self, sess, coord): print("Running", self.name, end='\n\n') self.starting_time = time() self.nb_ep = 1 nearlyDone = 0 with sess.as_default(), sess.graph.as_default(): with coord.stop_on_exception(): while not coord.should_stop(): self.states_buffer = [] self.actions_buffer = [] self.rewards_buffer = [] self.values_buffer = [] self.mean_values_buffer = [] self.total_steps = 0 episode_reward = 0 episode_step = 0 # Reset the local network to the global sess.run(self.update_local_vars) mean = 45 * TORAD std = 0 * TORAD wind_samples = 10 w = wind(mean=mean, std=std, samples=wind_samples) WH = w.generateWind() hdg0_rand = random.uniform(5, 12) hdg0 = hdg0_rand * TORAD * np.ones(10) s = self.env.reset(hdg0, WH) done = False #if self.worker_index == 1 and render and settings.DISPLAY: # self.env.set_render(True) #self.lstm_state = self.network.lstm_state_init #self.initial_lstm_state = self.lstm_state while not coord.should_stop() and not done and \ episode_step < settings.MAX_EPISODE_STEP: WH = np.random.uniform(mean - std, mean + std, size=wind_samples) s = np.reshape([s[0, :], s[1, :]], [2 * self.state_size, 1]) # Prediction of the policy and the value feed_dict = {self.network.inputs: [s]} policy, value = sess.run( [self.network.policy, self.network.value], feed_dict=feed_dict) policy, value = policy[0], value[0][0] if random.random() < self.epsilon: action = random.choice([1.5, 0, -1.5]) else: # Choose an action according to the policy action = np.random.choice([1.5, 0, -1.5], p=policy) s_, v = self.env.act(action, WH) #reward assignation algorithm if episode_step == 1: r = 0 elif s[int(self.state_size / 2 - 2)] > ( 13 * TORAD) and s[int(self.state_size / 2 - 2)] < ( 15 * TORAD ) and v > 0.63 and v < 0.67 and action < 0: r = 0.5 else: if v <= 0.69: r = 0 nearlyDone = 0 elif v > 0.69 and v <= 0.75: r = 0.00001 nearlyDone = 0 elif v > 0.75 and v <= 0.8: r = 0.01 nearlyDone = 0 elif v > 0.80: r = 0.1 if nearlyDone >= 3: r = 1 done = True elif nearlyDone == 2: r = 0.8 elif nearlyDone == 1: r = 0.25 nearlyDone = nearlyDone + 1 else: r = 0 nearlyDone = False #s_ = np.reshape(s_, [2*self.state_size,1]) # Store the experience self.states_buffer.append(s) self.actions_buffer.append(action) self.rewards_buffer.append(r) self.values_buffer.append(value) self.mean_values_buffer.append(value) episode_reward += r s = s_ episode_step += 1 self.total_steps += 1 # If we have more than MAX_LEN_BUFFER experiences, we # apply the gradients and update the global network, # then we empty the episode buffers if len(self.states_buffer) == settings.MAX_LEN_BUFFER \ and not done: feed_dict = { self.network.inputs: [ np.reshape([s[0, :], s[1, :]], [2 * self.state_size, 1]) ] } bootstrap_value = sess.run(self.network.value, feed_dict=feed_dict) self.train(sess, bootstrap_value ) #with this we change global network sess.run(self.update_local_vars) #self.initial_lstm_state = self.lstm_state if len(self.states_buffer) != 0: if done: bootstrap_value = 0 else: feed_dict = { self.network.inputs: [ np.reshape([s[0, :], s[1, :]], [2 * self.state_size, 1]) ] } bootstrap_value = sess.run(self.network.value, feed_dict=feed_dict) self.train(sess, bootstrap_value) if self.epsilon > settings.EPSILON_STOP: self.epsilon -= settings.EPSILON_DECAY self.nb_ep += 1 if not coord.should_stop(): DISPLAYER.add_reward(episode_reward, self.worker_index) if (self.worker_index == 1 and self.nb_ep % settings.DISP_EP_REWARD_FREQ == 0): print( 'Episode %2i, Initial hdg: %2i, Reward: %7.3f, Steps: %i, ' 'Epsilon: %7.3f' % (self.nb_ep, hdg0_rand, episode_reward, episode_step, self.epsilon)) print("Policy: ", policy) if (self.worker_index == 1 and self.nb_ep % settings.SAVE_FREQ == 0): self.save(self.total_steps) if time() - self.starting_time > settings.LIMIT_RUN_TIME: coord.request_stop() self.summary_writer.close()
def run(self): self.total_steps = 0 for ep in range(1, parameters.TRAINING_STEPS + 1): episode_reward = 0 episode_step = 0 done = False # Initialize exploration noise process noise_process = np.zeros(self.action_size) noise_scale = (parameters.NOISE_SCALE_INIT * parameters.NOISE_DECAY**ep) * \ (self.high_bound - self.low_bound) # Initial state s = self.env.reset() render = (ep % parameters.RENDER_FREQ == 0 and parameters.DISPLAY) self.env.set_render(render) while episode_step < parameters.MAX_EPISODE_STEPS and not done: # choose action based on deterministic policy a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: s[None]}) # add temporally-correlated exploration noise to action # (using an Ornstein-Uhlenbeck process) noise_process = parameters.EXPLO_THETA * \ (parameters.EXPLO_MU - noise_process) + \ parameters.EXPLO_SIGMA * np.random.randn(self.action_size) a += noise_scale * noise_process s_, r, done, info = self.env.act(a) episode_reward += r self.buffer.add((s, a, r, s_, 0.0 if done else 1.0)) # update network weights to fit a minibatch of experience if self.total_steps % parameters.TRAINING_FREQ == 0 and \ len(self.buffer) >= parameters.BATCH_SIZE: minibatch = self.buffer.sample() _, _ = self.sess.run( [ self.network.critic_train_op, self.network.actor_train_op ], feed_dict={ self.network.state_ph: np.asarray([elem[0] for elem in minibatch]), self.network.action_ph: np.asarray([elem[1] for elem in minibatch]), self.network.reward_ph: np.asarray([elem[2] for elem in minibatch]), self.network.next_state_ph: np.asarray([elem[3] for elem in minibatch]), self.network.is_not_terminal_ph: np.asarray([elem[4] for elem in minibatch]) }) # update target networks _ = self.sess.run(self.network.update_slow_targets_op) s = s_ episode_step += 1 self.total_steps += 1 if ep % parameters.DISP_EP_REWARD_FREQ == 0: print( 'Episode %2i, Reward: %7.3f, Steps: %i, Final noise scale: %7.3f' % (ep, episode_reward, episode_step, noise_scale)) DISPLAYER.add_reward(episode_reward)
def run(self): print("Beginning of the run...") self.pre_train() self.total_steps = 0 self.nb_ep = 1 while self.nb_ep < parameters.TRAINING_STEPS: s = self.env.reset() episode_reward = 0 done = False memory = deque() discount_R = 0 episode_step = 0 # Render parameters self.env.set_render(self.nb_ep % parameters.RENDER_FREQ == 0) while episode_step < parameters.MAX_EPISODE_STEPS and not done: if random.random() < self.epsilon: a = random.randint(0, self.action_size - 1) else: a = self.sess.run( self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s_, r, done, info = self.env.act(a) episode_reward += r memory.append((s, a, r, s_, done)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = r_mem for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT**(i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) if episode_step % parameters.TRAINING_FREQ == 0: train_batch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) # Incr beta if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR feed_dict = {self.mainQNetwork.inputs: train_batch[3]} mainQaction = self.sess.run(self.mainQNetwork.predict, feed_dict=feed_dict) feed_dict = {self.targetQNetwork.inputs: train_batch[3]} targetQvalues = self.sess.run(self.targetQNetwork.Qvalues, feed_dict=feed_dict) # Done multiplier : # equals 0 if the episode was done # equals 1 else done_multiplier = (1 - train_batch[4]) doubleQ = targetQvalues[range(parameters.BATCH_SIZE), mainQaction] targetQvalues = train_batch[2] + \ parameters.DISCOUNT * doubleQ * done_multiplier feed_dict = { self.mainQNetwork.inputs: train_batch[0], self.mainQNetwork.Qtarget: targetQvalues, self.mainQNetwork.actions: train_batch[1] } td_error, _ = self.sess.run( [self.mainQNetwork.td_error, self.mainQNetwork.train], feed_dict=feed_dict) self.buffer.update_priorities(train_batch[6], td_error + 1e-6) update_target(self.update_target_ops, self.sess) s = s_ episode_step += 1 self.total_steps += 1 # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY DISPLAYER.add_reward(episode_reward) self.total_steps += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print('Episode %2i, Reward: %7.3f, Steps: %i, Epsilon: %f' % (self.nb_ep, episode_reward, episode_step, self.epsilon)) self.nb_ep += 1
def work(self, sess, coord): print("Running", self.name, end='\n\n') self.starting_time = time() self.nb_ep = 1 with sess.as_default(), sess.graph.as_default(): with coord.stop_on_exception(): while not coord.should_stop(): self.states_buffer = [] self.actions_buffer = [] self.rewards_buffer = [] self.values_buffer = [] self.mean_values_buffer = [] self.lstm_buffer = [] self.total_steps = 0 episode_reward = 0 episode_step = 0 # Reset the local network to the global sess.run(self.update_local_vars) s = self.env.reset() done = False render = (self.nb_ep % parameters.RENDER_FREQ == 0) if render and parameters.DISPLAY: self.env.set_render(True) self.lstm_state = self.network.lstm_state_init self.initial_lstm_state = self.lstm_state while not coord.should_stop() and not done and \ episode_step < parameters.MAX_EPISODE_STEP: self.lstm_buffer.append(self.lstm_state) # Prediction of the policy and the value feed_dict = { self.network.inputs: [s], self.network.state_in: self.lstm_state } policy, value, self.lstm_state = sess.run( [ self.network.policy, self.network.value, self.network.state_out ], feed_dict=feed_dict) policy, value = policy[0], value[0][0] if random.random() < self.epsilon: action = random.randint(0, self.action_size - 1) else: # Choose an action according to the policy action = np.random.choice(self.action_size, p=policy) s_, r, done, _ = self.env.act(action) # Store the experience self.states_buffer.append(s) self.actions_buffer.append(action) self.rewards_buffer.append(r) self.values_buffer.append(value) self.mean_values_buffer.append(value) episode_reward += r s = s_ episode_step += 1 self.total_steps += 1 # If we have more than MAX_LEN_BUFFER experiences, we # apply the gradients and update the global network, # then we empty the episode buffers if len(self.states_buffer) == parameters.MAX_LEN_BUFFER \ and not done: feed_dict = { self.network.inputs: [s], self.network.state_in: self.lstm_state } bootstrap_value = sess.run(self.network.value, feed_dict=feed_dict) self.train(sess, bootstrap_value) sess.run(self.update_local_vars) self.initial_lstm_state = self.lstm_state if len(self.states_buffer) != 0: if done: bootstrap_value = 0 else: feed_dict = { self.network.inputs: [s], self.network.state_in: self.lstm_state } bootstrap_value = sess.run(self.network.value, feed_dict=feed_dict) self.train(sess, bootstrap_value) if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY self.nb_ep += 1 if not coord.should_stop(): DISPLAYER.add_reward(episode_reward, self.worker_index) if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print('Agent: %i, Episode %2i, Reward: %i, Steps: %i, ' 'Epsilon: %7.3f' % (self.worker_index, self.nb_ep, episode_reward, episode_step, self.epsilon)) if (self.worker_index == 1 and self.nb_ep % parameters.SAVE_FREQ == 0): self.save(self.total_steps) if time() - self.starting_time > parameters.LIMIT_RUN_TIME: coord.request_stop() self.env.set_render(False) self.summary_writer.close() self.env.close()
def run(self): #self.load("NetworkParam_best_ThirdSemester/FinalParam") #get the best parameters to start the training self.total_steps = 0 ''' WIND CONDITIONS ''' mean = 45 * TORAD std = 0.1 * TORAD wind_samples = 10 w = wind(mean=mean, std=std, samples = wind_samples) WH = w.generateWind() for ep in range(1, parameters.TRAINING_STEPS+1): episode_reward = 0 episode_step = 0 nearlyDone=0 done=False # Initialize exploration noise process noise_process = np.zeros(self.action_size) noise_scale = (parameters.NOISE_SCALE_INIT * parameters.NOISE_DECAY**ep) * \ (self.high_bound - self.low_bound) # Initial state w = wind(mean=mean, std=std, samples = wind_samples) WH = w.generateWind() hdg0_rand = random.uniform(6,13) hdg0 = hdg0_rand * TORAD * np.ones(10) s = self.env.reset(hdg0,WH) while episode_step < parameters.MAX_EPISODE_STEPS: #and not done: WH = np.random.uniform(mean - std, mean + std, size=wind_samples) # choose action based on deterministic policy s = np.reshape([s[0,:], s[1,:]], [self.state_size,1]) a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: s[None]}) # add temporally-correlated exploration noise to action # (using an Ornstein-Uhlenbeck process) noise_process = parameters.EXPLO_THETA * \ (parameters.EXPLO_MU - noise_process) + \ parameters.EXPLO_SIGMA * np.random.randn(self.action_size) a += noise_scale * noise_process #to respect the bounds: a = np.clip(a, self.low_bound, self.high_bound) s_, v = self.env.act(a,WH) #reward assignation algorithm if episode_step==1: r=0 #elif s[int(self.state_size/2-2)]>(13*TORAD) and s[int(self.state_size/2-2)]<(15*TORAD) and v>0.63 and v<0.67 and a<0: # r=0.1 else: if v<=0.69: r=0 nearlyDone = 0 elif v>0.69 and v<=0.75: r=0.00001 nearlyDone = 0 elif v>0.75 and v<=0.8: r=0.01 nearlyDone = 0 elif v>0.80: r=0.1 if nearlyDone>=3: r=1 done = True elif nearlyDone==2: r=0.8 elif nearlyDone==1: r=0.25 nearlyDone=nearlyDone+1 else: r=0 nearlyDone = False episode_reward += r self.buffer.add((s, np.reshape(a, [1,1] ), r, np.reshape(s_, [self.state_size,1]), 0.0 if episode_step<parameters.MAX_EPISODE_STEPS-1 else 1.0)) #, 0.0 if done else 1.0 # update network weights to fit a minibatch of experience if self.total_steps % parameters.TRAINING_FREQ == 0 and \ len(self.buffer) >= parameters.BATCH_SIZE: minibatch = self.buffer.sample() _, _,critic_loss = self.sess.run([self.network.critic_train_op, self.network.actor_train_op,self.network.critic_loss], feed_dict={ self.network.state_ph: np.asarray([elem[0] for elem in minibatch]), self.network.action_ph: np.asarray([elem[1] for elem in minibatch]), self.network.reward_ph: np.asarray([elem[2] for elem in minibatch]), self.network.next_state_ph: np.asarray([elem[3] for elem in minibatch]), self.network.is_not_terminal_ph: np.asarray([elem[4] for elem in minibatch])}) # update target networks _ = self.sess.run(self.network.update_slow_targets_op) s = s_ episode_step += 1 self.total_steps += 1 if ep % parameters.DISP_EP_REWARD_FREQ == 0: print('Episode %2i, initial heading: %7.3f, Reward: %7.3f, Final noise scale: %7.3f, critic loss: %7.3f' % (ep, hdg0[0]*(1/TORAD), episode_reward, noise_scale,critic_loss)) DISPLAYER.add_reward(episode_reward) # We save CNN weights every 500 epochs if ep % 500 == 0 and ep != 0: self.save("NetworkParam/"+ str(ep) +"_epochs") self.save("NetworkParam/"+"FinalParam")
def process(self, sess, total_steps, summary_writer, summary_op, score_input): start_time = time.time() buffer = [] done = False episode_step = 0 # copy weights from global to local sess.run(self.update_network) start_lstm_state = self.local_network.lstm_state_out for i in range(UPDATE_FREQ): pi, value = self.local_network.run_policy_and_value(sess, self.state) a = np.random.choice(ACTION_SIZE, p=pi) s_, r, terminal, _ = self.env.act(a) self.episode_reward += r # clip reward r = np.clip(r, -1, 1) buffer.append((self.state, a, r, value)) episode_step += 1 self.worker_total_steps += 1 self.state = s_ if terminal: done = True self.worker_total_eps += 1 DISPLAYER.add_reward(self.episode_reward, self.thread_index) if (self.thread_index == 1 and self.worker_total_eps % DISP_REWARD_FREQ == 0): cur_learning_rate = self._anneal_learning_rate(total_steps) print('Episode %i, Reward %i, Steps %i, LR %g' % (self.worker_total_eps, self.episode_reward, episode_step, cur_learning_rate)) self._record_score(sess, summary_writer, summary_op, score_input, self.episode_reward, total_steps) self.episode_reward = 0 self.env.reset() self.local_network.reset_state() render = (DISPLAY and self.thread_index == 1 and (self.worker_total_eps - 1) % RENDER_FREQ == 0) self.env.set_render(render) break batch_s = deque() batch_a = deque() batch_td = deque() batch_R = deque() # Bootstrapping R = 0.0 if not done: R = self.local_network.run_value(sess, self.state) # compute and accumulate gradients for i in range(len(buffer) - 1, -1, -1): si, ai, ri, Vi = buffer[i] R = ri + GAMMA * R td = R - Vi a = np.zeros([ACTION_SIZE]) a[ai] = 1 batch_s.appendleft(si) batch_a.appendleft(a) batch_td.appendleft(td) batch_R.appendleft(R) cur_learning_rate = self._anneal_learning_rate(total_steps) feed_dict = {self.local_network.state: batch_s, self.local_network.action: batch_a, self.local_network.td_error: batch_td, self.local_network.reward: batch_R, self.local_network.initial_lstm_state: start_lstm_state, self.local_network.step_size: [len(batch_a)], self.learning_rate_input: cur_learning_rate} sess.run(self.apply_gradients, feed_dict=feed_dict) if done and (self.thread_index == 1) and \ (self.worker_total_eps % PERF_FREQ == 0 or self.worker_total_eps == 15): global_time = time.time() - self.start_time steps_per_sec = total_steps / global_time print("### Performance : {} STEPS in {:.0f} sec." "{:.0f} STEPS/sec. {:.2f}M STEPS/hour ###".format( total_steps, global_time, steps_per_sec, steps_per_sec * 3600 / 1000000.)) elapsed_time = time.time() - start_time return elapsed_time, done, episode_step
def run(self): self.nb_ep = 1 self.total_steps = 0 for self.nb_ep in range(1, parameters.TRAINING_STEPS + 1): episode_reward = 0 episode_step = 0 done = False memory = deque() # Initial state s = self.env.reset() max_steps = parameters.MAX_EPISODE_STEPS + self.nb_ep // parameters.EP_ELONGATION while episode_step < max_steps and not done: if random.random() < self.epsilon: a = self.env.random() else: # choose action based on deterministic policy a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: [s]}) # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY s_, r, done, info = self.env.act(a) memory.append((s, a, r, s_, 0.0 if done else 1.0)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = 0 for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT**(i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) # update network weights to fit a minibatch of experience if self.total_steps % parameters.TRAINING_FREQ == 0 and \ len(self.buffer) >= parameters.BATCH_SIZE: minibatch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR td_errors, _, _ = self.sess.run( [ self.network.td_errors, self.network.critic_train_op, self.network.actor_train_op ], feed_dict={ self.network.state_ph: minibatch[0], self.network.action_ph: minibatch[1], self.network.reward_ph: minibatch[2], self.network.next_state_ph: minibatch[3], self.network.is_not_terminal_ph: minibatch[4] }) self.buffer.update_priorities(minibatch[6], td_errors + 1e-6) # update target networks _ = self.sess.run(self.network.update_slow_targets_op) episode_reward += r s = s_ episode_step += 1 self.total_steps += 1 self.nb_ep += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print( 'Episode %2i, Reward: %7.3f, Steps: %i, Epsilon : %7.3f, Max steps : %i' % (self.nb_ep, episode_reward, episode_step, self.epsilon, max_steps)) DISPLAYER.add_reward(episode_reward) if episode_reward > self.best_run and self.nb_ep > 100: self.best_run = episode_reward print("Best agent ! ", episode_reward) SAVER.save('best') if self.nb_ep % parameters.SAVE_FREQ == 0: SAVER.save(self.nb_ep)
def play(self, sess, number_run, path=''): print("Playing", self.name, "for", number_run, "runs") with sess.as_default(), sess.graph.as_default(): hdg0_rand_vec = [0, 7, 13] ''' WIND CONDITIONS ''' mean = 45 * TORAD std = 0 * TORAD wind_samples = 10 w = wind(mean=mean, std=std, samples=wind_samples) try: for i in range(number_run): # Reset the local network to the global if self.name != 'global': sess.run(self.update_local_vars) WH = w.generateWind() hdg0_rand = hdg0_rand_vec[i] hdg0 = hdg0_rand * TORAD * np.ones(10) s = self.env.reset(hdg0, WH) episode_reward = 0 episode_step = 0 v_episode = [] i_episode = [] done = False #self.lstm_state = self.network.lstm_state_init while (not done and episode_step < 70): i_episode.append(round(s[0][-1] / TORAD)) s = np.reshape([s[0, :], s[1, :]], [2 * self.state_size, 1]) # Prediction of the policy feed_dict = {self.network.inputs: [s]} policy, value = sess.run( [self.network.policy, self.network.value], feed_dict=feed_dict) policy = policy[0] # Choose an action according to the policy action = np.random.choice([1.5, 0, -1.5], p=policy) s_, r = self.env.act(action, WH) if episode_step > 12: if np.mean(v_episode[-4:]) > 0.8: #done=True print("Done!") else: done = False episode_reward += r v_episode.append(r) episode_step += 1 s = s_ DISPLAYER.displayVI(v_episode, i_episode, i) print("Episode reward :", episode_reward) except KeyboardInterrupt as e: pass finally: print("End of the demo")