def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size() self.action_size = self.env.get_action_size() print("Creation of the main QNetwork...") self.mainQNetwork = QNetwork(self.state_size, self.action_size, 'main') print("Main QNetwork created !\n") print("Creation of the target QNetwork...") self.targetQNetwork = QNetwork(self.state_size, self.action_size, 'target') print("Target QNetwork created !\n") self.buffer = PrioritizedReplayBuffer(parameters.BUFFER_SIZE, parameters.ALPHA) self.epsilon = parameters.EPSILON_START self.beta = parameters.BETA_START self.initial_learning_rate = parameters.LEARNING_RATE trainables = tf.trainable_variables() self.update_target_ops = updateTargetGraph(trainables) self.nb_ep = 1 self.best_run = -1e10
def __init__( self, max_replay_buffer_size, alpha, ): self.underlying = PrioritizedReplayBuffer(max_replay_buffer_size, alpha)
def __init__(self, sess, gui, displayer, saver): """ Build a new instance of Environment and QNetwork. Args: sess : the tensorflow session in which to build the network gui : a GUI instance to manage the control of the agent displayer: a Displayer instance to keep track of the episode rewards saver : a Saver instance to save periodically the network """ print("Initializing the agent...") self.sess = sess self.gui = gui self.displayer = displayer self.saver = saver self.env = Environment() self.QNetwork = QNetwork(sess) self.buffer = PrioritizedReplayBuffer(Settings.BUFFER_SIZE, Settings.ALPHA) self.epsilon = Settings.EPSILON_START self.beta = Settings.BETA_START self.delta_z = (Settings.MAX_Q - Settings.MIN_Q) / (Settings.NB_ATOMS - 1) self.z = np.linspace(Settings.MIN_Q, Settings.MAX_Q, Settings.NB_ATOMS) self.best_run = -1e10 self.n_gif = 0 print("Agent initialized !\n")
def __init__(self, state_size, action_size, path="Learning/Weights/weights.h5", new_weights=True, memory_size=100000, replay_start_size=6000, epsilon=1, epsilon_min=.05, max_step_for_epsilon_decay=125000*3, prioritized_replay=False, alpha=0.6, beta=0.4, beta_inc=0.0000005): self.state_size = state_size self.action_size = action_size self.path = path self.use_prio_buffer = prioritized_replay if not prioritized_replay: self.memory = deque(maxlen=memory_size) else: self.prio_memory = PrioritizedReplayBuffer(memory_size, alpha) self.beta = beta self.beta_inc = beta_inc # self.beta_schedule = LinearSchedule(max_step_for_epsilon_decay, # 1, # 0.4) self.gamma = 0.95 # discount rate self.epsilon = epsilon # exploration rate self.epsilon_min = epsilon_min self.epsilon_decay = 0.995 self.max_step_for_lin_epsilon_decay = max_step_for_epsilon_decay self.epsilon_decay_linear = self.epsilon / self.max_step_for_lin_epsilon_decay self.learning_rate = 0.00025 self.replay_start_size = replay_start_size self.model = self._build_model() self.target_model = clone_model(self.model) #self._build_model() self.target_model.compile(optimizer='sgd', loss='mse') self.step = 0 if not new_weights: self.model.load_weights(path) self.update_target() self.callback = Evaluation.create_tensorboard()
def initialize(self): # Create the replay buffer if self.prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer( self.buffer_size, alpha=self.prioritized_replay_alpha) if self.prioritized_replay_beta_iters is None: self.prioritized_replay_beta_iters = self.max_timesteps self.beta_schedule = LinearSchedule( self.prioritized_replay_beta_iters, initial_p=self.prioritized_replay_beta0, final_p=1.0) else: self.replay_buffer = ReplayBuffer(self.buffer_size) self.beta_schedule = None # Create the schedule for exploration starting from 1. # self.exploration = LinearSchedule(schedule_timesteps=int(self.exploration_fraction * self.max_timesteps), # initial_p=1.0, # final_p=self.exploration_final_eps) self.exploration = ConstantSchedule(self.exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() self.update_target() return 'initialize() complete'
def __init__(self, mem_queue, max_timesteps=1000000, buffer_size=50000, batch_size=32, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6): threading.Thread.__init__(self) self.mem_queue = mem_queue self.prioritized_replay = prioritized_replay self.batch_size = batch_size self.batch_idxes = None self.prioritized_replay_eps = prioritized_replay_eps # Create the replay buffer if prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps self.beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: self.replay_buffer = ReplayBuffer(buffer_size) self.beta_schedule = None
def create_replay_buffer(buffer_type, size): if buffer_type == 'PER': replay_buffer = PrioritizedReplayBuffer(size, 0.5) elif buffer_type == 'ER': replay_buffer = ReplayBuffer(size) else: replay_buffer = None return replay_buffer
class BaselinesPERBuffer(SimpleReplayBuffer): def __init__( self, max_replay_buffer_size, alpha, ): self.underlying = PrioritizedReplayBuffer(max_replay_buffer_size, alpha) def add_sample(self, observation, action, reward, terminal, next_observation, **kwargs): self.underlying.add(observation, action, reward, next_observation, terminal) def random_batch(self, batch_size, beta): return self.underlying.sample(batch_size, beta) def num_steps_can_sample(self): return len(self.underlying) def update_priorities(self, *args, **kwargs): self.underlying.update_priorities(*args, **kwargs)
def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size()[0] self.action_size = self.env.get_action_size() self.low_bound, self.high_bound = self.env.get_bounds() self.buffer = PrioritizedReplayBuffer(parameters.BUFFER_SIZE, parameters.ALPHA) print("Creation of the actor-critic network...") self.network = Network(self.state_size, self.action_size, self.low_bound, self.high_bound) print("Network created !\n") self.epsilon = parameters.EPSILON_START self.beta = parameters.BETA_START self.best_run = -1e10 self.sess.run(tf.global_variables_initializer())
def make_replay_buffer(self): if self.config["prioritized_replay"]: self.replay_buffer = PrioritizedReplayBuffer( self.config["buffer_size"], alpha=self.config["prioritized_replay_alpha"]) if self.config["prioritized_replay_beta_iters"] is None: self.config["prioritized_replay_beta_iters"] = self.config[ "max_timesteps"] self.beta_schedule = LinearSchedule( self.config["prioritized_replay_beta_iters"], initial_p=self.config["prioritized_replay_beta0"], final_p=1.0) else: self.replay_buffer = ReplayBuffer(self.config["buffer_size"]) self.beta_schedule = None
def __init__(self, identifier, actions, observation_shape, num_steps, x=0.0, y=0.0): self.id = identifier self.actions = actions self.x = x self.y = y self.yellow_steps = 0 self.postponed_action = None self.obs = None self.current_action = None self.weights = np.ones(32) self.td_errors = np.ones(32) self.pre_train = 2500 self.prioritized = False self.prioritized_eps = 1e-4 self.batch_size = 32 self.buffer_size = 30000 self.learning_freq = 500 self.target_update = 5000 # Create all the functions necessary to train the model self.act, self.train, self.update_target, self.debug = deepq.build_train( make_obs_ph=lambda name: TrafficTfInput(observation_shape, name=name), q_func=dueling_model, num_actions=len(actions), optimizer=tf.train.AdamOptimizer(learning_rate=1e-4, epsilon=1e-4), gamma=0.99, double_q=True, scope="deepq" + identifier ) # Create the replay buffer if self.prioritized: self.replay_buffer = PrioritizedReplayBuffer(size=self.buffer_size, alpha=0.6) self.beta_schedule = LinearSchedule(num_steps // 4, initial_p=0.4, final_p=1.0) else: self.replay_buffer = ReplayBuffer(self.buffer_size) # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). self.exploration = LinearSchedule(schedule_timesteps=int(num_steps * 0.1), initial_p=1.0, final_p=0.01) # Initialize the parameters and copy them to the target network. U.initialize() self.update_target()
num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=0.99, grad_norm_clipping=10, double_q=args.double_q, param_noise=args.param_noise) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([(0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01)], outside_value=0.01) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size) U.initialize() update_target() num_iters = 0 # Load the model state = maybe_load_model(savedir, container) if state is not None: num_iters, replay_buffer = state["num_iters"], state[ "replay_buffer"],
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=0.99, grad_norm_clipping=10, double_q=args.double_q, param_noise=args.param_noise ) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([ (0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01) ], outside_value=0.01) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size) U.initialize() update_target() num_iters = 0 # Load the model state = maybe_load_model(savedir, container) if state is not None: num_iters, replay_buffer = state["num_iters"], state["replay_buffer"], monitored_env.set_state(state["monitor_state"]) start_time, start_steps = None, None
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, thompson=True, prior="no prior", **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ blr_params = BLRParams() # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) # q_func = build_q_func(network, **network_kwargs) q_func = build_q_func_and_features(network, hiddens=[blr_params.feat_dim], **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) #deep mind optimizer # dm_opt = tf.train.RMSPropOptimizer(learning_rate=0.00025,decay=0.95,momentum=0.0,epsilon=0.00001,centered=True) act, train, update_target, debug, blr_additions = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer( learning_rate=lr ), #tf.train.RMSPropOptimizer(learning_rate=lr,momentum=0.95),# gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, thompson=thompson, double_q=thompson) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: # replay_buffer = ReplayBuffer(buffer_size) replay_buffer = ReplayBufferPerActionNew(buffer_size, env.action_space.n) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) num_actions = env.action_space.n if thompson: # Create parameters for Bayesian Regression feat_dim = blr_additions['feat_dim'] num_models = 5 print("num models is: {}".format(num_models)) w_sample = np.random.normal(loc=0, scale=blr_params.sigma, size=(num_actions, num_models, feat_dim)) w_mu = np.zeros((num_actions, feat_dim)) w_cov = np.zeros((num_actions, feat_dim, feat_dim)) for i in range(num_actions): w_cov[i] = blr_params.sigma * np.eye(feat_dim) phiphiT = np.zeros((num_actions, feat_dim, feat_dim), dtype=np.float32) phiphiT_inv = np.zeros((num_actions, feat_dim, feat_dim), dtype=np.float32) for i in range(num_actions): phiphiT[i] = (1 / blr_params.sigma) * np.eye(feat_dim) phiphiT_inv[i] = blr_params.sigma * np.eye(feat_dim) old_phiphiT_inv = [phiphiT_inv for i in range(5)] phiY = np.zeros((num_actions, feat_dim), dtype=np.float32) YY = np.zeros(num_actions) model_idx = np.random.randint(0, num_models, size=num_actions) blr_ops = blr_additions['blr_ops'] blr_ops_old = blr_additions['blr_ops_old'] last_layer_weights = np.zeros((feat_dim, num_actions)) phiphiT0 = np.copy(phiphiT) invgamma_a = [blr_params.a0 for _ in range(num_actions)] invgamma_b = [blr_params.a0 for _ in range(num_actions)] # Initialize the parameters and copy them to the target network. U.initialize() # update_target() if thompson: blr_additions['update_old']() if isinstance(blr_additions['update_old_target'], list): for update_net in reversed(blr_additions['update_old_target']): update_net() else: blr_additions['update_old_target']() if blr_additions['old_networks'] is not None: for key in blr_additions['old_networks'].keys(): blr_additions['old_networks'][key]["update"]() episode_rewards = [0.0] # episode_Q_estimates = [0.0] unclipped_episode_rewards = [0.0] # eval_rewards = [0.0] old_networks_num = 5 # episode_pseudo_count = [[0.0] for i in range(old_networks_num)] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) actions_hist = [0 for _ in range(num_actions)] actions_hist_total = [0 for _ in range(num_actions)] last_layer_weights_decaying_average = None blr_counter = 0 action_buffers_size = 512 action_buffers = [ ReplayBuffer(action_buffers_size) for _ in range(num_actions) ] eval_flag = False eval_counter = 0 for t in tqdm(range(total_timesteps)): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True if thompson: # for each action sample one of the num_models samples of w model_idx = np.random.randint(0, num_models, size=num_actions) cur_w = np.zeros((num_actions, feat_dim)) for i in range(num_actions): cur_w[i] = w_sample[i, model_idx[i]] action, estimate = act(np.array(obs)[None], cur_w[None]) actions_hist[int(action)] += 1 actions_hist_total[int(action)] += 1 else: action, estimate = act(np.array(obs)[None], update_eps=update_eps, **kwargs) env_action = action reset = False new_obs, unclipped_rew, done_list, _ = env.step(env_action) if isinstance(done_list, list): done, real_done = done_list else: done, real_done = done_list, done_list rew = np.sign(unclipped_rew) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) action_buffers[action].add(obs, action, rew, new_obs, float(done)) if action_buffers[action]._next_idx == 0: obses_a, actions_a, rewards_a, obses_tp1_a, dones_a = replay_buffer.get_samples( [i for i in range(action_buffers_size)]) phiphiT_a, phiY_a, YY_a = blr_ops_old(obses_a, actions_a, rewards_a, obses_tp1_a, dones_a) phiphiT[action] += phiphiT_a phiY[action] += phiY_a YY[action] += YY_a precision = phiphiT[action] + phiphiT0[action] cov = np.linalg.pinv(precision) mu = np.array( np.dot(cov, (phiY[action] + np.dot( phiphiT0[action], last_layer_weights[:, action])))) invgamma_a[action] += 0.5 * action_buffers_size b_upd = 0.5 * YY[action] b_upd += 0.5 * np.dot( last_layer_weights[:, action].T, np.dot(phiphiT0[action], last_layer_weights[:, action])) b_upd -= 0.5 * np.dot(mu.T, np.dot(precision, mu)) invgamma_b[action] += b_upd # old_phiphiT_inv_a = [np.tile(oppTi[action], (action_buffers_size,1,1)) for oppTi in old_phiphiT_inv] # old_pseudo_count = blr_additions['old_pseudo_counts'](obses_a, *old_phiphiT_inv_a) # old_pseudo_count = np.sum(old_pseudo_count, axis=-1) # for i in range(old_networks_num): # idx = ((blr_counter-1)-i) % old_networks_num # arrange networks from newest to oldest # episode_pseudo_count[i][-1] += old_pseudo_count[idx] # if real_done: # for a in range(num_actions): # if action_buffers[a]._next_idx != 0: # obses_a, actions_a, rewards_a, obses_tp1_a, dones_a = replay_buffer.get_samples([i for i in range(action_buffers[a]._next_idx)]) # nk = obses_a.shape[0] # # # old_phiphiT_inv_a = [np.tile(oppTi[action],(nk,1,1)) for oppTi in old_phiphiT_inv] # # old_pseudo_count = blr_additions['old_pseudo_counts'](obses_a, *old_phiphiT_inv_a) # # old_pseudo_count = np.sum(old_pseudo_count, axis=-1) # # for i in range(old_networks_num): # # idx = ((blr_counter-1)-i) % old_networks_num # arrange networks from newest to oldest # # episode_pseudo_count[i][-1] += old_pseudo_count[idx] # # phiphiT_a, phiY_a, YY_a = blr_ops_old(obses_a, actions_a, rewards_a, obses_tp1_a, dones_a) # phiphiT[a] += phiphiT_a # phiY[a] += phiY_a # YY[a] += YY_a # # action_buffers[a]._next_idx = 0 obs = new_obs episode_rewards[-1] += rew # episode_Q_estimates[-1] += estimate unclipped_episode_rewards[-1] += unclipped_rew if t % 250000 == 0 and t > 0: eval_flag = True if done: obs = env.reset() episode_rewards.append(0.0) # episode_Q_estimates.append(0.0) reset = True if real_done: unclipped_episode_rewards.append(0.0) # for i in range(old_networks_num): # episode_pseudo_count[i].append(0.0) # every time full episode ends run eval episode if eval_flag: te = 0 print("running evaluation") eval_rewards = [0.0] while te < 125000: # for te in range(125000): real_done = False print(te) while not real_done: action, _ = blr_additions['eval_act']( np.array(obs)[None]) new_obs, unclipped_rew, done_list, _ = env.step( action) if isinstance(done_list, list): done, real_done = done_list else: done, real_done = done_list, done_list eval_rewards[-1] += unclipped_rew obs = new_obs te += 1 if done: obs = env.reset() if real_done: eval_rewards.append(0.0) obs = env.reset() eval_rewards.pop() mean_reward_eval = round(np.mean(eval_rewards), 2) logger.record_tabular("mean eval episode reward", mean_reward_eval) logger.dump_tabular() eval_flag = False # eval_counter += 1 # if eval_counter % 10 == 0: # if t > learning_starts: # real_done = False # while not real_done: # action, _ = blr_additions['eval_act'](np.array(obs)[None]) # new_obs, unclipped_rew, done_list, _ = env.step(action) # done, real_done = done_list # eval_rewards[-1] += unclipped_rew # obs = new_obs # eval_rewards.append(0.0) # obs = env.reset() if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if thompson: if t > learning_starts and t % ( blr_params.update_w * target_network_update_freq) == 0: phiphiT_inv = np.zeros_like(phiphiT) for i in range(num_actions): try: phiphiT_inv[i] = np.linalg.inv(phiphiT[i]) except: phiphiT_inv[i] = np.linalg.pinv(phiphiT[i]) old_phiphiT_inv[blr_counter % 5] = phiphiT_inv llw = sess.run(blr_additions['last_layer_weights']) phiphiT, phiY, phiphiT0, last_layer_weights, YY, invgamma_a, invgamma_b = BayesRegression( phiphiT, phiY, replay_buffer, blr_additions['feature_extractor'], blr_additions['target_feature_extractor'], num_actions, blr_params, w_mu, w_cov, llw, prior=prior, blr_ops=blr_additions['blr_ops'], sdp_ops=blr_additions['sdp_ops'], old_networks=blr_additions['old_networks'], blr_counter=blr_counter, old_feat=blr_additions['old_feature_extractor'], a=invgamma_a) blr_counter += 1 if seed is not None: print('seed is {}'.format(seed)) blr_additions['update_old']() if isinstance(blr_additions['update_old_target'], list): for update_net in reversed( blr_additions['update_old_target']): update_net() else: blr_additions['update_old_target']() if blr_additions['old_networks'] is not None: blr_additions['old_networks'][blr_counter % 5]["update"]() if thompson: if t > 0 and t % blr_params.sample_w == 0: # sampling num_models samples of w if debug: print(actions_hist) else: if t % 10000 == 0: print(actions_hist) actions_hist = [0 for _ in range(num_actions)] # if t > 1000000: adaptive_sigma = True # else: # adaptive_sigma = False cov_norms = [] cov_norms_no_sigma = [] sampled_sigmas = [] for i in range(num_actions): if prior == 'no prior' or last_layer_weights is None: cov = np.linalg.inv(phiphiT[i]) mu = np.array(np.dot(cov, phiY[i])) elif prior == 'last layer': cov = np.linalg.inv(phiphiT[i]) mu = np.array( np.dot(cov, (phiY[i] + (1 / blr_params.sigma) * last_layer_weights[:, i]))) elif prior == 'single sdp': try: cov = np.linalg.inv(phiphiT[i] + phiphiT0) except: print("singular matrix using pseudo inverse") cov = np.linalg.pinv(phiphiT[i] + phiphiT0) mu = np.array( np.dot(cov, (phiY[i] + np.dot( phiphiT0, last_layer_weights[:, i])))) elif prior == 'sdp' or prior == 'linear': try: cov = np.linalg.inv(phiphiT[i] + phiphiT0[i]) except: # print("singular matrix") cov = np.linalg.pinv(phiphiT[i] + phiphiT0[i]) mu = np.array( np.dot(cov, (phiY[i] + np.dot( phiphiT0[i], last_layer_weights[:, i])))) else: print("No valid prior") exit(0) for j in range(num_models): if adaptive_sigma: sigma = invgamma_b[i] * invgamma.rvs( invgamma_a[i]) else: sigma = blr_params.sigma try: w_sample[i, j] = np.random.multivariate_normal( mu, sigma * cov) except: w_sample[i, j] = mu cov_norms.append(np.linalg.norm(sigma * cov)) cov_norms_no_sigma.append(np.linalg.norm(cov)) sampled_sigmas.append(sigma) if t % 7 == 0: for i, cov_norm in enumerate(cov_norms): print( "cov*sigma norm for action {}: {}, visits: {}". format(i, cov_norm, len(replay_buffer.buffers[i]))) # if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. # print(update_target) # update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) mean_10ep_reward = round(np.mean(episode_rewards[-11:-1]), 1) mean_100ep_reward_unclipped = round( np.mean(unclipped_episode_rewards[-101:-1]), 1) mean_10ep_reward_unclipped = round( np.mean(unclipped_episode_rewards[-11:-1]), 1) # mean_100ep_reward_eval = round(np.mean(eval_rewards[-101:-1]), 1) # mean_10ep_reward_eval = round(np.mean(eval_rewards[-11:-1]), 1) # mean_100ep_est = round(np.mean(episode_Q_estimates[-101:-1]), 1) # mean_10ep_est = round(np.mean(episode_Q_estimates[-11:-1]), 1) num_episodes = len(episode_rewards) # mean_10ep_pseudo_count = [0.0 for _ in range(old_networks_num)] # mean_100ep_pseudo_count = [0.0 for _ in range(old_networks_num)] # for i in range(old_networks_num): # mean_10ep_pseudo_count[i] = round(np.log(np.mean(episode_pseudo_count[i][-11:-1])), 1) # mean_100ep_pseudo_count[i] = round(np.log(np.mean(episode_pseudo_count[i][-101:-1])), 1) # if done and print_freq is not None and len(episode_rewards) % print_freq == 0: if t % 10000 == 0 and t > 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 10 episode reward", mean_10ep_reward) logger.record_tabular("mean 100 unclipped episode reward", mean_100ep_reward_unclipped) logger.record_tabular("mean 10 unclipped episode reward", mean_10ep_reward_unclipped) # logger.record_tabular("mean 100 eval episode reward", mean_100ep_reward_eval) # logger.record_tabular("mean 10 eval episode reward", mean_10ep_reward_eval) # for i in range(old_networks_num): # logger.record_tabular("mean 10 episode pseudo count for -{} net".format(i+1), mean_10ep_pseudo_count[i]) # logger.record_tabular("mean 100 episode pseudo count for -{} net".format(i+1), mean_100ep_pseudo_count[i]) # logger.record_tabular("mean 100 episode Q estimates", mean_100ep_est) # logger.record_tabular("mean 10 episode Q estimates", mean_10ep_est) logger.dump_tabular() if t % 7 == 0: print("len(unclipped_episode_rewards)") print(len(unclipped_episode_rewards)) print("len(episode_rewards)") print(len(episode_rewards)) if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_state(model_file) return act
class Agent: def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size() self.action_size = self.env.get_action_size() print("Creation of the main QNetwork...") self.mainQNetwork = QNetwork(self.state_size, self.action_size, 'main') print("Main QNetwork created !\n") print("Creation of the target QNetwork...") self.targetQNetwork = QNetwork(self.state_size, self.action_size, 'target') print("Target QNetwork created !\n") self.buffer = PrioritizedReplayBuffer(parameters.BUFFER_SIZE, parameters.ALPHA) self.epsilon = parameters.EPSILON_START self.beta = parameters.BETA_START self.initial_learning_rate = parameters.LEARNING_RATE trainables = tf.trainable_variables() self.update_target_ops = updateTargetGraph(trainables) self.nb_ep = 1 self.best_run = -1e10 def pre_train(self): print("Beginning of the pre-training...") for i in range(parameters.PRE_TRAIN_STEPS): s = self.env.reset() done = False episode_step = 0 episode_reward = 0 while episode_step < parameters.MAX_EPISODE_STEPS and not done: a = random.randint(0, self.action_size - 1) s_, r, done, info = self.env.act(a) self.buffer.add(s, a, r, s_, done) s = s_ episode_reward += r episode_step += 1 if i % 100 == 0: print("\tPre-train step n", i) self.best_run = max(self.best_run, episode_reward) print("End of the pre training !") def run(self): print("Beginning of the run...") self.pre_train() self.total_steps = 0 self.nb_ep = 1 while self.nb_ep < parameters.TRAINING_STEPS: self.learning_rate = self.initial_learning_rate * \ (parameters.TRAINING_STEPS - self.nb_ep) / \ parameters.TRAINING_STEPS s = self.env.reset() episode_reward = 0 done = False memory = deque() discount_R = 0 episode_step = 0 max_step = parameters.MAX_EPISODE_STEPS + \ self.nb_ep // parameters.EP_ELONGATION # Render parameters self.env.set_render(self.nb_ep % parameters.RENDER_FREQ == 0) while episode_step < max_step and not done: if random.random() < self.epsilon: a = random.randint(0, self.action_size - 1) else: a = self.sess.run(self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s_, r, done, info = self.env.act(a) episode_reward += r memory.append((s, a, r, s_, done)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = r_mem for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT ** (i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) if episode_step % parameters.TRAINING_FREQ == 0: train_batch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) # Incr beta if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR feed_dict = {self.mainQNetwork.inputs: train_batch[0]} oldQvalues = self.sess.run(self.mainQNetwork.Qvalues, feed_dict=feed_dict) tmp = [0] * len(oldQvalues) for i, oldQvalue in enumerate(oldQvalues): tmp[i] = oldQvalue[train_batch[1][i]] oldQvalues = tmp feed_dict = {self.mainQNetwork.inputs: train_batch[3]} mainQaction = self.sess.run(self.mainQNetwork.predict, feed_dict=feed_dict) feed_dict = {self.targetQNetwork.inputs: train_batch[3]} targetQvalues = self.sess.run(self.targetQNetwork.Qvalues, feed_dict=feed_dict) # Done multiplier : # equals 0 if the episode was done # equals 1 else done_multiplier = (1 - train_batch[4]) doubleQ = targetQvalues[range(parameters.BATCH_SIZE), mainQaction] targetQvalues = train_batch[2] + \ parameters.DISCOUNT * doubleQ * done_multiplier errors = np.square(targetQvalues - oldQvalues) + 1e-6 self.buffer.update_priorities(train_batch[6], errors) feed_dict = {self.mainQNetwork.inputs: train_batch[0], self.mainQNetwork.Qtarget: targetQvalues, self.mainQNetwork.actions: train_batch[1], self.mainQNetwork.learning_rate: self.learning_rate} _ = self.sess.run(self.mainQNetwork.train, feed_dict=feed_dict) update_target(self.update_target_ops, self.sess) s = s_ episode_step += 1 self.total_steps += 1 # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY DISPLAYER.add_reward(episode_reward) # if episode_reward > self.best_run and \ # self.nb_ep > 50: # self.best_run = episode_reward # print("Save best", episode_reward) # SAVER.save('best') # self.play(1) self.total_steps += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print('Episode %2i, Reward: %7.3f, Steps: %i, Epsilon: %.3f' ', Max steps: %i, Learning rate: %g' % ( self.nb_ep, episode_reward, episode_step, self.epsilon, max_step, self.learning_rate)) # Save the model if self.nb_ep % parameters.SAVE_FREQ == 0: SAVER.save(self.nb_ep) self.nb_ep += 1 def play(self, number_run): print("Playing for", number_run, "runs") try: for i in range(number_run): self.env.set_render(True) s = self.env.reset() episode_reward = 0 done = False episode_step = 0 max_step = parameters.MAX_EPISODE_STEPS + \ self.nb_ep // parameters.EP_ELONGATION while episode_step < max_step and not done: a = self.sess.run(self.mainQNetwork.predict, feed_dict={self.mainQNetwork.inputs: [s]}) a = a[0] s, r, done, info = self.env.act(a) episode_reward += r episode_step += 1 print("Episode reward :", episode_reward) except KeyboardInterrupt as e: pass except Exception as e: print("Exception :", e) finally: self.env.set_render(False) print("End of the demo") self.env.close() def stop(self): self.env.close()
def __init__(self, env, gamma, total_timesteps, network='mlp', lr=5e-4, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, learning_starts=1000, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, **network_kwargs): """DQN wrapper to train option policies Parameters ------- env: gym.Env environment to train on gamma: float discount factor network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) total_timesteps: int number of env steps to optimizer for lr: float learning rate for adam optimizer buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training learning_starts: int how many steps of the model to collect transitions for before learning starts target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) **network_kwargs additional keyword arguments to pass to the network builder. """ # Adjusting hyper-parameters by considering the number of options policies to learn num_options = env.get_number_of_options() buffer_size = num_options * buffer_size batch_size = num_options * batch_size q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.option_observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) self.num_actions = env.option_action_space.n act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=self.num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, scope="options") act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': self.num_actions, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int( exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() # Variables that are used during learning self.act = act self.train = train self.update_target = update_target self.replay_buffer = replay_buffer self.beta_schedule = beta_schedule self.exploration = exploration self.param_noise = param_noise self.train_freq = train_freq self.batch_size = batch_size self.learning_starts = learning_starts self.target_network_update_freq = target_network_update_freq self.prioritized_replay = prioritized_replay self.prioritized_replay_alpha = prioritized_replay_alpha self.prioritized_replay_beta0 = prioritized_replay_beta0 self.prioritized_replay_beta_iters = prioritized_replay_beta_iters self.prioritized_replay_eps = prioritized_replay_eps
def learn(env, q_func, num_actions=4, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput((32, 32), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, scope="deepq") # # act_y, train_y, update_target_y, debug_y = deepq.build_train( # make_obs_ph=make_obs_ph, # q_func=q_func, # num_actions=num_actions, # optimizer=tf.train.AdamOptimizer(learning_rate=lr), # gamma=gamma, # grad_norm_clipping=10, # scope="deepq_y" # ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) # replay_buffer_y = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) # beta_schedule_y = LinearSchedule(prioritized_replay_beta_iters, # initial_p=prioritized_replay_beta0, # final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) # replay_buffer_y = ReplayBuffer(buffer_size) beta_schedule = None # beta_schedule_y = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule( schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() # update_target_y() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() # Select all marines first obs = env.step( actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = (player_relative == _PLAYER_NEUTRAL).astype(int) #+ path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if (player[0] > 16): screen = shift(LEFT, player[0] - 16, screen) elif (player[0] < 16): screen = shift(RIGHT, 16 - player[0], screen) if (player[1] > 16): screen = shift(UP, player[1] - 16, screen) elif (player[1] < 16): screen = shift(DOWN, 16 - player[1], screen) reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join("model/", "mineral_shards") print(model_file) for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act( np.array(screen)[None], update_eps=update_eps, **kwargs)[0] # action_y = act_y(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] reset = False coord = [player[0], player[1]] rew = 0 if (action == 0): #UP if (player[1] >= 8): coord = [player[0], player[1] - 8] #path_memory_[player[1] - 16 : player[1], player[0]] = -1 elif (player[1] > 0): coord = [player[0], 0] #path_memory_[0 : player[1], player[0]] = -1 #else: # rew -= 1 elif (action == 1): #DOWN if (player[1] <= 23): coord = [player[0], player[1] + 8] #path_memory_[player[1] : player[1] + 16, player[0]] = -1 elif (player[1] > 23): coord = [player[0], 31] #path_memory_[player[1] : 63, player[0]] = -1 #else: # rew -= 1 elif (action == 2): #LEFT if (player[0] >= 8): coord = [player[0] - 8, player[1]] #path_memory_[player[1], player[0] - 16 : player[0]] = -1 elif (player[0] < 8): coord = [0, player[1]] #path_memory_[player[1], 0 : player[0]] = -1 #else: # rew -= 1 elif (action == 3): #RIGHT if (player[0] <= 23): coord = [player[0] + 8, player[1]] #path_memory_[player[1], player[0] : player[0] + 16] = -1 elif (player[0] > 23): coord = [31, player[1]] #path_memory_[player[1], player[0] : 63] = -1 if _MOVE_SCREEN not in obs[0].observation["available_actions"]: obs = env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) new_action = [ sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord]) ] # else: # new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] new_screen = (player_relative == _PLAYER_NEUTRAL).astype( int) #+ path_memory player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if (player[0] > 16): new_screen = shift(LEFT, player[0] - 16, new_screen) elif (player[0] < 16): new_screen = shift(RIGHT, 16 - player[0], new_screen) if (player[1] > 16): new_screen = shift(UP, player[1] - 16, new_screen) elif (player[1] < 16): new_screen = shift(DOWN, 16 - player[1], new_screen) rew = obs[0].reward done = obs[0].step_type == environment.StepType.LAST # Store transition in the replay buffer. replay_buffer.add(screen, action, rew, new_screen, float(done)) # replay_buffer_y.add(screen, action_y, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: obs = env.reset() player_relative = obs[0].observation["screen"][ _PLAYER_RELATIVE] screen = (player_relative == _PLAYER_NEUTRAL).astype( int) #+ path_memory player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] # Select all marines first env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) episode_rewards.append(0.0) #episode_minerals.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience # experience_y = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) # (obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y, batch_idxes_y) = experience_y else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None # obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y = replay_buffer_y.sample(batch_size) # weights_y, batch_idxes_y = np.ones_like(rewards_y), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) # td_errors_y = train_x(obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps # new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) # replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() # update_target_y() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}". format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
def __init__( self, env, # observation_space, # action_space, network=None, scope='deepq', seed=None, lr=None, # Was 5e-4 lr_mc=5e-4, total_episodes=None, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=None, # was 0.02 train_freq=1, train_log_freq=100, batch_size=32, print_freq=100, checkpoint_freq=10000, # checkpoint_path=None, learning_starts=1000, gamma=None, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, save_path=None, load_path=None, save_reward_threshold=None, **network_kwargs): super().__init__(env, seed) if train_log_freq % train_freq != 0: raise ValueError( 'Train log frequency should be a multiple of train frequency') elif checkpoint_freq % train_log_freq != 0: raise ValueError( 'Checkpoint freq should be a multiple of train log frequency, or model saving will not be logged properly' ) print('init dqnlearningagent') self.train_log_freq = train_log_freq self.scope = scope self.learning_starts = learning_starts self.save_reward_threshold = save_reward_threshold self.batch_size = batch_size self.train_freq = train_freq self.total_episodes = total_episodes self.total_timesteps = total_timesteps # TODO: scope not doing anything. if network is None and 'lunar' in env.unwrapped.spec.id.lower(): if lr is None: lr = 1e-3 if exploration_final_eps is None: exploration_final_eps = 0.02 #exploration_fraction = 0.1 #exploration_final_eps = 0.02 target_network_update_freq = 1500 #print_freq = 100 # num_cpu = 5 if gamma is None: gamma = 0.99 network = 'mlp' network_kwargs = { 'num_layers': 2, 'num_hidden': 64, } self.target_network_update_freq = target_network_update_freq self.gamma = gamma get_session() # set_global_seeds(seed) # TODO: Check whether below is ok to substitue for set_global_seeds. try: import tensorflow as tf tf.set_random_seed(seed) except ImportError: pass self.q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph def make_obs_ph(name): return ObservationInput(env.observation_space, name=name) act, self.train, self.train_mc, self.update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=self.q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), optimizer_mc=tf.train.AdamOptimizer(learning_rate=lr_mc), gamma=gamma, grad_norm_clipping=10, param_noise=False, scope=scope, # reuse=reuse, ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': self.q_func, 'num_actions': env.action_space.n, } self._act = ActWrapper(act, act_params) self.print_freq = print_freq self.checkpoint_freq = checkpoint_freq # Create the replay buffer self.prioritized_replay = prioritized_replay self.prioritized_replay_eps = prioritized_replay_eps if self.prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha, ) if prioritized_replay_beta_iters is None: if total_episodes is not None: raise NotImplementedError( 'Need to check how to set exploration based on episodes' ) prioritized_replay_beta_iters = total_timesteps self.beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0, ) else: self.replay_buffer = ReplayBuffer(buffer_size) self.replay_buffer_mc = ReplayBuffer(buffer_size) self.beta_schedule = None # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule( schedule_timesteps=int( exploration_fraction * total_timesteps if total_episodes is None else total_episodes), initial_p=1.0, final_p=exploration_final_eps, ) # Initialize the parameters and copy them to the target network. U.initialize() self.update_target() self.episode_lengths = [0] self.episode_rewards = [0.0] self.discounted_episode_rewards = [0.0] self.start_values = [None] self.lunar_crashes = [0] self.lunar_goals = [0] self.saved_mean_reward = None self.td = None if save_path is None: self.td = tempfile.mkdtemp() outdir = self.td self.model_file = os.path.join(outdir, "model") else: outdir = os.path.dirname(save_path) os.makedirs(outdir, exist_ok=True) self.model_file = save_path print('DQN agent saving to:', self.model_file) self.model_saved = False if tf.train.latest_checkpoint(outdir) is not None: # TODO: Check scope addition load_variables(self.model_file, scope=self.scope) # load_variables(self.model_file) logger.log('Loaded model from {}'.format(self.model_file)) self.model_saved = True raise Exception('Check that we want to load previous model') elif load_path is not None: # TODO: Check scope addition load_variables(load_path, scope=self.scope) # load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) self.train_log_file = None if save_path and load_path is None: self.train_log_file = self.model_file + '.log.csv' with open(self.train_log_file, 'w') as f: cols = [ 'episode', 't', 'td_max', 'td_mean', '100ep_r_mean', '100ep_r_mean_discounted', '100ep_v_mean', '100ep_n_crashes_mean', '100ep_n_goals_mean', 'saved_model', 'smoothing', ] f.write(','.join(cols) + '\n') self.training_episode = 0 self.t = 0 self.episode_t = 0 """ n = observation_space.n m = action_space.n self.Q = np.zeros((n, m)) self._lr_schedule = lr_schedule self._eps_schedule = eps_schedule self._boltzmann_schedule = boltzmann_schedule """ # Make placeholder for Q values self.q_values = debug['q_values']
if __name__ == '__main__': with U.make_session(8): # Create the environment env = gym.make("CartPole-v0") # Create all the functions necessary to train the model act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: U.BatchInput(env.observation_space.shape, name=name), q_func=model, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=5e-4), param_noise=False ) # Create the replay buffer replay_buffer = PrioritizedReplayBuffer(50000, alpha=0.6) # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02) # Initialize the parameters and copy them to the target network. U.initialize() update_target() tvars = tf.trainable_variables() tvars_vals = U.get_session().run(tvars) for var, val in zip(tvars, tvars_vals): print(var.name, val) episode_rewards = [0.0]
def learn(env, q_func, beta1=0.9, beta2=0.999, epsilon=1e-8, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, exploration_schedule=None, start_lr=5e-4, end_lr=5e-4, start_step=0, end_step=1, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, model_directory=None, lamda=0.1): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer beta1: float beta1 parameter for adam beta2: float beta2 parameter for adam epsilon: float epsilon parameter for adam max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability exploration_schedule: Schedule a schedule for exploration chance train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return ObservationInput(env.observation_space, name=name) global_step = tf.Variable(0, trainable=False) lr = interpolated_decay(start_lr, end_lr, global_step, start_step, end_step) act, train, update_target, debug = multiheaded_build_graph.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr, beta1=beta1, beta2=beta2, epsilon=epsilon), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, global_step=global_step, lamda=lamda, ) tf.summary.FileWriter(logger.get_dir(), graph_def=sess.graph_def) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. if exploration_schedule is None: exploration = LinearSchedule(schedule_timesteps=int( exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) else: exploration = exploration_schedule # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False if model_directory is None: model_directory = pathlib.Path(td) model_file = str(model_directory / "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] if isinstance(env.action_space, gym.spaces.MultiBinary): env_action = np.zeros(env.action_space.n) env_action[action] = 1 else: env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) act.save(str(model_directory / "act_model.pkl")) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return act
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph if(env.is_single): observation_space_shape = env.observation_space.shape num_actions = env.action_space.n else: observation_space_shape = env.observation_space[0].shape num_actions = env.action_space[0].n num_agents=env.agentSize def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size*num_agents, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size*num_agents) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action=[] qval=[] for i in range(num_agents): prediction=act(np.array(obs[i])[None], update_eps=update_eps, **kwargs) #print(prediction[0],prediction[1][0]) action.append(prediction[0][0]) qval.append(prediction[1][0]) env_action = action reset = False new_obs, rew, done, _ = env.step(env_action,qval) # Store transition in the replay buffer. for i in range(num_agents): replay_buffer.add(obs[i], action[i], rew, new_obs[i], float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t*num_agents % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None #print(obses_t.shape,actions.shape,rewards.shape,obses_tp1.shape,dones.shape) td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_state(model_file) return act,episode_rewards
def pok_learn(env, q_func, lr=5e-4, max_timesteps=1000, #DP DEL 000 buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1500, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = tf.Session() sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape #ok def make_obs_ph(name): return U.BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, #ok optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, #ok } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. #DP - don't need this # exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), # initial_p=1.0, # final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] td_error_list = [] saved_mean_reward = None saved_td_error = None obs = env.reset() #ok reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): #DP this somehow uses exploration break #DP - not needed # Take action and update exploration to the newest value # kwargs = {} # if not param_noise: # update_eps = exploration.value(t) # update_param_noise_threshold = 0. # else: # update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. # update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) # kwargs['reset'] = reset # kwargs['update_param_noise_threshold'] = update_param_noise_threshold # kwargs['update_param_noise_scale'] = True action = np.int64(env.action_space.sample()) #act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] #DP this is what we replace - what does act do?? env_action = action #DP action reset = False new_obs, rew, done, _ = env.step(env_action) #ok # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None # #DP EDIT # print("at step t " + str(t)) # print("printing obses_t, actions, rewards, obses_tp1, dones, weights") # print(obses_t, actions, rewards, obses_tp1, dones, weights) # print("%"*30) td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) td_error_list.append(np.mean(np.abs(td_errors))) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() #DP - convert to TD errors? mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) if len(td_error_list) > 1000 / batch_size: mean_1000step_tderror = round(np.mean(td_error_list[-int(round(100/batch_size)):-1]),5) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward (how to interpret)?", mean_100ep_reward) if len(td_error_list) > 1000 / batch_size: logger.record_tabular("mean abs 1000 td errs", mean_1000step_tderror) #DP logger.record_tabular("0% time spent exploring since using handlogs", 0) #int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_td_error is None or mean_1000step_tderror < saved_td_error: #mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to new avg trailing td error: {} -> {}".format( #DP saved_mean_reward, mean_1000step_tderror)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward saved_td_error = mean_1000step_tderror import pdb; pdb.set_trace() if model_saved: if print_freq is not None: logger.log("Restored model with mean reward & error: {} and {}".format(saved_mean_reward, saved_td_error)) U.load_state(model_file) return act
class Agent: def __init__(self, sess): print("Initializing the agent...") self.sess = sess self.env = Environment() self.state_size = self.env.get_state_size()[0] self.action_size = self.env.get_action_size() self.low_bound, self.high_bound = self.env.get_bounds() self.buffer = PrioritizedReplayBuffer(parameters.BUFFER_SIZE, parameters.ALPHA) print("Creation of the actor-critic network...") self.network = Network(self.state_size, self.action_size, self.low_bound, self.high_bound) print("Network created !\n") self.epsilon = parameters.EPSILON_START self.beta = parameters.BETA_START self.best_run = -1e10 self.sess.run(tf.global_variables_initializer()) def run(self): self.nb_ep = 1 self.total_steps = 0 for self.nb_ep in range(1, parameters.TRAINING_STEPS + 1): episode_reward = 0 episode_step = 0 done = False memory = deque() # Initial state s = self.env.reset() max_steps = parameters.MAX_EPISODE_STEPS + self.nb_ep // parameters.EP_ELONGATION while episode_step < max_steps and not done: if random.random() < self.epsilon: a = self.env.random() else: # choose action based on deterministic policy a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: [s]}) # Decay epsilon if self.epsilon > parameters.EPSILON_STOP: self.epsilon -= parameters.EPSILON_DECAY s_, r, done, info = self.env.act(a) memory.append((s, a, r, s_, 0.0 if done else 1.0)) if len(memory) > parameters.N_STEP_RETURN: s_mem, a_mem, r_mem, ss_mem, done_mem = memory.popleft() discount_R = 0 for i, (si, ai, ri, s_i, di) in enumerate(memory): discount_R += ri * parameters.DISCOUNT**(i + 1) self.buffer.add(s_mem, a_mem, discount_R, s_, done) # update network weights to fit a minibatch of experience if self.total_steps % parameters.TRAINING_FREQ == 0 and \ len(self.buffer) >= parameters.BATCH_SIZE: minibatch = self.buffer.sample(parameters.BATCH_SIZE, self.beta) if self.beta <= parameters.BETA_STOP: self.beta += parameters.BETA_INCR td_errors, _, _ = self.sess.run( [ self.network.td_errors, self.network.critic_train_op, self.network.actor_train_op ], feed_dict={ self.network.state_ph: minibatch[0], self.network.action_ph: minibatch[1], self.network.reward_ph: minibatch[2], self.network.next_state_ph: minibatch[3], self.network.is_not_terminal_ph: minibatch[4] }) self.buffer.update_priorities(minibatch[6], td_errors + 1e-6) # update target networks _ = self.sess.run(self.network.update_slow_targets_op) episode_reward += r s = s_ episode_step += 1 self.total_steps += 1 self.nb_ep += 1 if self.nb_ep % parameters.DISP_EP_REWARD_FREQ == 0: print( 'Episode %2i, Reward: %7.3f, Steps: %i, Epsilon : %7.3f, Max steps : %i' % (self.nb_ep, episode_reward, episode_step, self.epsilon, max_steps)) DISPLAYER.add_reward(episode_reward) if episode_reward > self.best_run and self.nb_ep > 100: self.best_run = episode_reward print("Best agent ! ", episode_reward) SAVER.save('best') if self.nb_ep % parameters.SAVE_FREQ == 0: SAVER.save(self.nb_ep) def play(self, number_run): print("Playing for", number_run, "runs") try: for i in range(number_run): s = self.env.reset() episode_reward = 0 done = False while not done: a, = self.sess.run(self.network.actions, feed_dict={self.network.state_ph: [s]}) s_, r, done, info = self.env.act(a) episode_reward += r print("Episode reward :", episode_reward) except KeyboardInterrupt as e: pass except Exception as e: print("Exception :", e) finally: print("End of the demo") self.env.close() def close(self): self.env.close()
def learn(env, q_func, num_actions=64*64, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() # Set up summary Ops summary_ops, summary_vars = build_summaries() writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph) def make_obs_ph(name): return U.BatchInput((64, 64), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10 ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] episode_minerals = [0.0] saved_mean_reward = None path_memory = np.zeros((64,64)) obs = env.reset() # Select all marines first step_result = env.step(actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] obs = player_relative + path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if(player[0]>32): obs = shift(LEFT, player[0]-32, obs) elif(player[0]<32): obs = shift(RIGHT, 32 - player[0], obs) if(player[1]>32): obs = shift(UP, player[1]-32, obs) elif(player[1]<32): obs = shift(DOWN, 32 - player[1], obs) reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] reset = False coord = [player[0], player[1]] rew = 0 path_memory_ = np.array(path_memory, copy=True) if(action == 0): #UP if(player[1] >= 16): coord = [player[0], player[1] - 16] path_memory_[player[1] - 16 : player[1], player[0]] = -1 elif(player[1] > 0): coord = [player[0], 0] path_memory_[0 : player[1], player[0]] = -1 else: rew -= 1 elif(action == 1): #DOWN if(player[1] <= 47): coord = [player[0], player[1] + 16] path_memory_[player[1] : player[1] + 16, player[0]] = -1 elif(player[1] > 47): coord = [player[0], 63] path_memory_[player[1] : 63, player[0]] = -1 else: rew -= 1 elif(action == 2): #LEFT if(player[0] >= 16): coord = [player[0] - 16, player[1]] path_memory_[player[1], player[0] - 16 : player[0]] = -1 elif(player[0] < 16): coord = [0, player[1]] path_memory_[player[1], 0 : player[0]] = -1 else: rew -= 1 elif(action == 3): #RIGHT if(player[0] <= 47): coord = [player[0] + 16, player[1]] path_memory_[player[1], player[0] : player[0] + 16] = -1 elif(player[0] > 47): coord = [63, player[1]] path_memory_[player[1], player[0] : 63] = -1 else: rew -= 1 else: #Cannot move, give minus reward rew -= 1 if(path_memory[coord[1],coord[0]] != 0): rew -= 0.5 path_memory = np.array(path_memory_) #print("action : %s Coord : %s" % (action, coord)) new_action = [sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord])] step_result = env.step(actions=new_action) player_relative = step_result[0].observation["screen"][_PLAYER_RELATIVE] new_obs = player_relative + path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if(player[0]>32): new_obs = shift(LEFT, player[0]-32, new_obs) elif(player[0]<32): new_obs = shift(RIGHT, 32 - player[0], new_obs) if(player[1]>32): new_obs = shift(UP, player[1]-32, new_obs) elif(player[1]<32): new_obs = shift(DOWN, 32 - player[1], new_obs) rew += step_result[0].reward * 10 done = step_result[0].step_type == environment.StepType.LAST # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew episode_minerals[-1] += step_result[0].reward if done: obs = env.reset() player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] obs = player_relative + path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] if(player[0]>32): obs = shift(LEFT, player[0]-32, obs) elif(player[0]<32): obs = shift(RIGHT, 32 - player[0], obs) if(player[1]>32): obs = shift(UP, player[1]-32, obs) elif(player[1]<32): obs = shift(DOWN, 32 - player[1], obs) # Select all marines first env.step(actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) episode_rewards.append(0.0) episode_minerals.append(0.0) path_memory = np.zeros((64,64)) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) mean_100ep_mineral = round(np.mean(episode_minerals[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: summary_str = sess.run(summary_ops, feed_dict={ summary_vars[0]: mean_100ep_reward, summary_vars[1]: mean_100ep_mineral }) writer.add_summary(summary_str, num_episodes) writer.flush() logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 100 episode mineral", mean_100ep_mineral) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
def main(): with tf_util.make_session(4) as session: act_fn, train_fn, target_update_fn, debug_fn = deepq.build_train( make_obs_ph=lambda name: Uint8Input([input_height, input_width], name=name), q_func=q_function_nn, num_actions=action_space_size, optimizer=tf.train.AdamOptimizer(learning_rate=0.001), gamma=0.99, grad_norm_clipping=10, double_q=False) epsilon = PiecewiseSchedule([(0, 1.0), (10000, 1.0), # since we start training at 10000 steps (20000, 0.4), (50000, 0.2), (100000, 0.1), (500000, 0.05)], outside_value=0.01) replay_memory = PrioritizedReplayBuffer(replay_memory_size, replay_alpha) beta = LinearSchedule(int(NUM_STEPS/4), initial_p=replay_beta, final_p=1.0) tf_util.initialize() target_update_fn() state = env.reset() state = preprocess_frame(state) watch_train = False dq = [] # a queue to store episode rewards start_step = 1 episode = 1 if is_load_model: dict_state = load_model() replay_memory = dict_state["replay_memory"] dq = dict_state["dq"] start_step = dict_state["step"] + 1 for step in itertools.count(start=start_step): action = act_fn(state[np.newaxis], update_eps=epsilon.value(step))[0] state_tplus1, reward, is_finished, _ = env.step(action) dq.append(reward) if watch_flag: env.render() time.sleep(1.0/fps) state_tplus1 = preprocess_frame(state_tplus1) replay_memory.add(state, action, reward, state_tplus1, float(is_finished)) state = state_tplus1 if is_finished: ep_reward = sum(dq) log.logkv("Steps", step) log.logkv("Episode reward", ep_reward) log.logkv("Episode number", episode) log.dumpkvs() print("Step", step, ". Finished episode", episode, "with reward ", ep_reward) dq = [] state = preprocess_frame(env.reset()) episode += 1 for _ in range(30): # NOOP for ~90 frames to skip the start screen. Range 30 used because each # step executed for 3 frames on average. Action 0 stands for doing nothing env.step(0) if watch_flag: env.render() if step > 10000 and step % learn_freq == 0: # only start training after 10000 steps are completed batch = replay_memory.sample(batch_size, beta=beta.value(step)) states = batch[0] actions = batch[1] rewards = batch[2] states_tplus1 = batch[3] finished_vars = batch[4] weights = batch[5] state_indeces = batch[6] errors = train_fn(states, actions, rewards, states_tplus1, finished_vars, weights) priority_order_new = np.abs(errors) + replay_epsilon replay_memory.update_priorities(state_indeces, priority_order_new) if step % save_freq == 0: print("State save", step) dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) if step > NUM_STEPS: print("Finished training. Saving model to ./saved_model/model.ckpt") dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) break
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=5, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the trained model from. (default: None)(used in test stage) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) med_libs = MedLibs() '''Define Q network inputs: observation place holder(make_obs_ph), num_actions, scope, reuse outputs(tensor of shape batch_size*num_actions): values of each action, Q(s,a_{i}) ''' q_func = build_q_func(network, **network_kwargs) ''' To put observations into a placeholder ''' # TODO: Can only deal with Discrete and Box observation spaces for now # observation_space = env.observation_space (default) # Use sub_obs_space instead observation_space = med_libs.subobs_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) ''' Customize action ''' # TODO: subset of action space. action_dim = med_libs.sub_act_dim ''' Returns: deepq.build_train() act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. act is computed by [build_act] or [build_act_with_param_noise] train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. update_target: () -> () copy the parameters from optimized Q function to the target Q function. debug: {str: function} a bunch of functions to print debug data like q_values. ''' act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=action_dim, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, double_q=True, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': action_dim, } '''Contruct an act object using ActWrapper''' act = ActWrapper(act, act_params) ''' Create the replay buffer''' if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None '''Create the schedule for exploration starting from 1.''' exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) ''' Initialize all the uninitialized variables in the global scope and copy them to the target network. ''' U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() sub_obs = med_libs.custom_obs(obs) # TODO: customize observations pre_obs = obs reset = True mydict = med_libs.action_dict already_starts = False with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: # load_path: a trained model/policy load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) ''' Training loop starts''' t = 0 while t < total_timesteps: if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True ''' Choose action: take action and update exploration to the newest value ''' # TODO: Mixed action strategy # Normal status, action is easily determined by rules, use [obs] action = med_libs.simple_case_action(obs) # Distraction status, action is determined by Q, with [sub_obs] if action == -10: action = act(np.array(sub_obs)[None], update_eps=update_eps, **kwargs)[0] action = med_libs.action_Q_env( action ) # TODO:action_Q_env, from Q_action(0~2) to env_action(2~4) reset = False ''' Step action ''' new_obs, rew, done, d_info = env.step(action) d_att_last = int(pre_obs[0][0]) d_att_now = int(obs[0][0]) d_att_next = int(new_obs[0][0]) ''' Store transition in the replay buffer.''' pre_obs = obs obs = new_obs sub_new_obs = med_libs.custom_obs(new_obs) if (d_att_last == 0 and d_att_now == 1) and not already_starts: already_starts = True if already_starts and d_att_now == 1: replay_buffer.add(sub_obs, action, rew, sub_new_obs, float(done)) episode_rewards[-1] += rew # Sum of rewards t = t + 1 print( '>> Iteration:{}, State[d_att,cd_activate,L4_available,ssl4_activate,f_dc]:{}' .format(t, sub_obs)) print( 'Dis_Last:{}, Dis_Now:{}, Dis_Next:{},Reward+Cost:{}, Action:{}' .format( d_att_last, d_att_now, d_att_next, rew, list(mydict.keys())[list( mydict.values()).index(action)])) # update sub_obs sub_obs = sub_new_obs # Done and Reset if done: print('Done infos: ', d_info) print('======= end =======') obs = env.reset() sub_obs = med_libs.custom_obs(obs) # TODO: custom obs pre_obs = obs # TODO: save obs at t-1 already_starts = False episode_rewards.append(0.0) reset = True # Update the Q network parameters if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None # Calculate td-errors actions = med_libs.action_env_Q( actions ) # TODO:action_env_Q, from env_action(2~4) to Q_action(0~2) td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically, copy weights of Q to target Q update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.01, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=50, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, callback=None, num_optimisation_steps=40): """Train a deepq model. Parameters ------- env : gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput((env.observation_space.shape[0] * 2, ), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_max_rewards = [env.reward_max] episode_rewards = [0.0] saved_mean_reward_diff = None # difference in saved reward obs = env.reset(seed=np.random.randint(0, 1000)) with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") episode_buffer = [None] * env.n episode_timestep = 0 for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value action = act(np.concatenate([obs, env.goal])[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. episode_buffer[episode_timestep] = (obs, action, rew, new_obs, float(done)) episode_timestep += 1 replay_buffer.add(np.concatenate([obs, env.goal]), action, rew, np.concatenate([new_obs, env.goal]), float(done)) obs = new_obs episode_rewards[-1] += rew num_episodes = len(episode_rewards) #######end of episode if done: for episode in range(episode_timestep): obs1, action1, _, new_obs1, done1 = episode_buffer[episode] goal_prime = new_obs1 rew1 = env.calculate_reward(new_obs1, goal_prime) replay_buffer.add(np.concatenate([obs1, goal_prime]), action1, rew1, np.concatenate([new_obs1, goal_prime]), float(done1)) episode_timestep = 0 obs = env.reset(seed=np.random.randint(0, 1000)) episode_rewards.append(0.0) episode_max_rewards.append(env.reward_max) #############Training Q if t > learning_starts and num_episodes % train_freq == 0: for i in range(num_optimisation_steps): # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs( td_errors) + prioritized_replay_eps replay_buffer.update_priorities( batch_idxes, new_priorities) #############Training Q target if t > learning_starts and num_episodes % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) mean_100ep_max_reward = np.mean(episode_max_rewards[-101:-1]) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 100 episode max reward", mean_100ep_max_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and num_episodes % checkpoint_freq == 0): if saved_mean_reward_diff is None or mean_100ep_max_reward - mean_100ep_reward < saved_mean_reward_diff: if print_freq is not None: logger.log( "Saving model due to mean reward difference decrease: {} -> {}" .format(saved_mean_reward_diff, mean_100ep_max_reward - mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward_diff = mean_100ep_max_reward - mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward_diff)) U.load_state(model_file) return ActWrapper(act, act_params)
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=3000, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=3000, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name), q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=0.99, double_q=False #grad_norm_clipping=10, # param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(10000), initial_p=1.0, final_p=0.02) # Initialize the parameters and copy them to the target network. U.initialize() update_target() old_state = None formula_LTLf_1 = "!d U(g)" monitoring_RightToLeft = MonitoringSpecification( ltlf_formula=formula_LTLf_1, r=0, c=-0.01, s=10, f=-10 ) formula_LTLf_2 = "F(G(bb)) " # break brick monitoring_BreakBrick = MonitoringSpecification( ltlf_formula=formula_LTLf_2, r=10, c=-0.01, s=10, f=0 ) monitoring_specifications = [monitoring_BreakBrick, monitoring_RightToLeft] def RightToLeftConversion(observation) -> TraceStep: done=False global old_state if arrays_equal(observation[-9:], np.zeros((len(observation[-9:])))): ### Checking if all Bricks are broken # print('goal reached') goal = True # all bricks are broken done = True else: goal = False dead = False if done and not goal: dead = True order = check_ordered(observation[-9:]) if not order: # print('wrong order', state[5:]) dead=True done = True if old_state is not None: # if not the first state if not arrays_equal(old_state[-9:], observation[-9:]): brick_broken = True # check_ordered(state[-9:]) # print(' a brick is broken') else: brick_broken = False else: brick_broken = False dictionary={'g': goal, 'd': dead, 'o': order, 'bb':brick_broken} #print(dictionary) return dictionary multi_monitor = MultiRewardMonitor( monitoring_specifications=monitoring_specifications, obs_to_trace_step=RightToLeftConversion ) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True # initialize done = False #monitor.get_reward(None, False) # add first state in trace with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) episodeCounter=0 num_episodes=0 for t in itertools.count(): # Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] #print(action) #print(action) new_obs, rew, done, _ = env.step(action) done=False #done=False ## FOR FIRE ONLY #print(new_obs) #new_obs.append() start_time = time.time() rew, is_perm = multi_monitor(new_obs) #print("--- %s seconds ---" % (time.time() - start_time)) old_state=new_obs #print(rew) done=done or is_perm # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 if episodeCounter % 100 == 0 or episodeCounter<1: # Show off the result #print("coming here Again and Again") env.render() if done: episodeCounter+=1 num_episodes+=1 obs = env.reset() old_state=None episode_rewards.append(0) multi_monitor.reset() #monitor.get_reward(None, False) else: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if t > 1000: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(64) train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards)) # Update target network periodically. if t % 1000 == 0: update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) if done and len(episode_rewards) % 10 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", len(episode_rewards)) logger.record_tabular("currentEpisodeReward", episode_rewards[-1]) logger.record_tabular("mean 100 episode reward", round(np.mean(episode_rewards[-101:-1]), 1)) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) act.save_act() #save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward # if model_saved: # if print_freq is not None: # logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) # load_variables(model_file) return act
def learn(env, q_func, num_actions=3, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None, demo_replay=[] ): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput((64, 64), name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10 ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() # Select all marines first player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = player_relative obs = common.init(env, obs) group_id = 0 reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True # custom process for DefeatZerglingsAndBanelings obs, screen, player = common.select_marine(env, obs) action = act(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] reset = False rew = 0 new_action = None obs, new_action = common.marine_action(env, obs, player, action) army_count = env._obs.observation.player_common.army_count try: if army_count > 0 and _ATTACK_SCREEN in obs[0].observation["available_actions"]: obs = env.step(actions=new_action) else: new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) except Exception as e: #print(e) 1 # Do nothing player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] new_screen = player_relative rew += obs[0].reward done = obs[0].step_type == environment.StepType.LAST selected = obs[0].observation["screen"][_SELECTED] player_y, player_x = (selected == _PLAYER_FRIENDLY).nonzero() if(len(player_y)>0): player = [int(player_x.mean()), int(player_y.mean())] if(len(player) == 2): if(player[0]>32): new_screen = common.shift(LEFT, player[0]-32, new_screen) elif(player[0]<32): new_screen = common.shift(RIGHT, 32 - player[0], new_screen) if(player[1]>32): new_screen = common.shift(UP, player[1]-32, new_screen) elif(player[1]<32): new_screen = common.shift(DOWN, 32 - player[1], new_screen) # Store transition in the replay buffer. replay_buffer.add(screen, action, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: print("Episode Reward : %s" % episode_rewards[-1]) obs = env.reset() player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = player_relative group_list = common.init(env, obs) # Select all marines first #env.step(actions=[sc2_actions.FunctionCall(_SELECT_UNIT, [_SELECT_ALL])]) episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return ActWrapper(act)
def learn(env, q_func, num_actions=4, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, param_noise_threshold=0.05, callback=None): """Train a deepq model. Parameters ------- env: pysc2.env.SC2Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() #def make_obs_ph(name): #return U.BatchInput((16, 16), name=name) obs_spec = env.observation_spec()[0] screen_dim = obs_spec['feature_screen'][1:3] def make_obs_ph(name): #return ObservationInput(ob_space, name=name) return ObservationInput(Box(low=0.0, high=screen_dim[0], shape=(screen_dim[0], screen_dim[1], 1)), name=name) act_x, train_x, update_target_x, debug_x = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, scope="deepq_x") act_y, train_y, update_target_y, debug_y = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, scope="deepq_y") act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } # Create the replay buffer if prioritized_replay: replay_buffer_x = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) replay_buffer_y = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule_x = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) beta_schedule_y = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer_x = ReplayBuffer(buffer_size) replay_buffer_y = ReplayBuffer(buffer_size) beta_schedule_x = None beta_schedule_y = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target_x() update_target_y() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() # Select all marines first obs = env.step( actions=[sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])]) print(obs[0].observation.keys()) player_relative = obs[0].observation["feature_screen"][_PLAYER_RELATIVE] screen = (player_relative == _PLAYER_NEUTRAL).astype(int) #+ path_memory player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join("model/", "mineral_shards") print(model_file) for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. if param_noise_threshold >= 0.: update_param_noise_threshold = param_noise_threshold else: # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action_x = act_x(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] action_y = act_y(np.array(screen)[None], update_eps=update_eps, **kwargs)[0] reset = False coord = [player[0], player[1]] rew = 0 coord = [action_x, action_y] if _MOVE_SCREEN not in obs[0].observation["available_actions"]: obs = env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) new_action = [ sc2_actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, coord]) ] # else: # new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) player_relative = obs[0].observation["feature_screen"][ _PLAYER_RELATIVE] new_screen = (player_relative == _PLAYER_NEUTRAL).astype(int) player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] rew = obs[0].reward done = obs[0].step_type == environment.StepType.LAST # Store transition in the replay buffer. replay_buffer_x.add(screen, action_x, rew, new_screen, float(done)) replay_buffer_y.add(screen, action_y, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: obs = env.reset() player_relative = obs[0].observation["feature_screen"][ _PLAYER_RELATIVE] screent = (player_relative == _PLAYER_NEUTRAL).astype(int) player_y, player_x = ( player_relative == _PLAYER_FRIENDLY).nonzero() player = [int(player_x.mean()), int(player_y.mean())] # Select all marines first env.step(actions=[ sc2_actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL]) ]) episode_rewards.append(0.0) #episode_minerals.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience_x = replay_buffer_x.sample( batch_size, beta=beta_schedule_x.value(t)) (obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x, weights_x, batch_idxes_x) = experience_x experience_y = replay_buffer_y.sample( batch_size, beta=beta_schedule_y.value(t)) (obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y, batch_idxes_y) = experience_y else: obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x = replay_buffer_x.sample( batch_size) weights_x, batch_idxes_x = np.ones_like(rewards_x), None obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y = replay_buffer_y.sample( batch_size) weights_y, batch_idxes_y = np.ones_like(rewards_y), None td_errors_x = train_x(obses_t_x, actions_x, rewards_x, obses_tp1_x, dones_x, weights_x) td_errors_y = train_x(obses_t_y, actions_y, rewards_y, obses_tp1_y, dones_y, weights_y) if prioritized_replay: new_priorities_x = np.abs( td_errors_x) + prioritized_replay_eps new_priorities_y = np.abs( td_errors_y) + prioritized_replay_eps replay_buffer_x.update_priorities(batch_idxes_x, new_priorities_x) replay_buffer_y.update_priorities(batch_idxes_y, new_priorities_y) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target_x() update_target_y() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act_x), ActWrapper(act_y)
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act, debug['q_func'], debug['obs']
is_training=True, history_length=HISTORY_LENGTH, commission_percentage=COMMISSION_PERCENTAGE) asset_features_shape = [dc.num_assets, HISTORY_LENGTH, dc.num_asset_features] action_dim = dc.num_assets actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim)) # rpb = ReplayBuffer(buffer_size=BUFFER_SIZE) # conf = { # 'size': BUFFER_SIZE, # 'batch_size': BATCH_SIZE, # 'learn_start': 1000, # 'steps': NUM_EPISODES * EPISODE_LENGTH # } # rpb = Experience(conf) rpb = PrioritizedReplayBuffer(size=BUFFER_SIZE, alpha=0.6) sess = tf.Session() actor = ActorNetwork(sess=sess, asset_features_shape=asset_features_shape, action_dim=action_dim, action_bound=1, learning_rate=LEARNING_RATE, tau=TAU, batch_size=BATCH_SIZE) critic = CriticNetwork(sess=sess, asset_features_shape=asset_features_shape, action_dim=action_dim, learning_rate=LEARNING_RATE, tau=TAU, gamma=GAMMA,