def __init__(self, is_chief, env, model, config, should_render=True): self.config = config self.is_chief = is_chief self.env = env self.should_render = should_render self.act, self.train, self.update_target, self.debug = multi_deepq.build_train( make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name), q_func=model, num_actions=env.action_space.n, gamma=config.gamma, optimizer=tf.train.AdamOptimizer(learning_rate=config.learning_rate), reuse=(not is_chief), ) self.max_iteraction_count = int(self.config.num_iterations) # Create the replay buffer self.replay_buffer = ReplayBuffer(config.replay_size) if self.config.exploration_schedule == "constant": self.exploration = ConstantSchedule(0.1) elif self.config.exploration_schedule == "linear": # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). self.exploration = LinearSchedule( schedule_timesteps=self.config.num_iterations / 4, initial_p=1.0, final_p=0.02) elif self.config.exploration_schedule == "piecewise": approximate_num_iters = self.config.num_iterations self.exploration = PiecewiseSchedule([ (0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01) ], outside_value=0.01) else: raise ValueError("Bad exploration schedule")
def __init__(self, index, is_chief, env, model, queue, config, logger, episode_logger, should_render=False): self.config = config self.is_chief = is_chief self.env = env self.global_step = tf.train.get_global_step() self.should_render = should_render self.logger = logger self.episode_logger = episode_logger self.log_frequency = 10 with tf.device('/cpu:0'): self.act, self.update_params, self.debug = qdqn.build_act( make_obs_ph=lambda name: U.Uint8Input(self.env.observation_space.shape, name=name), q_func=model, num_actions=self.env.action_space.n, scope="actor_{}".format(index), learner_scope="learner", reuse=False) with tf.device('/cpu:0'): obs_t_input = tf.placeholder(tf.uint8, self.env.observation_space.shape, name="obs_t") act_t_ph = tf.placeholder(tf.int32, self.env.action_space.shape, name="action") rew_t_ph = tf.placeholder(tf.float32, [], name="reward") obs_tp1_input = tf.placeholder(tf.uint8, self.env.observation_space.shape, name="obs_tp1") done_mask_ph = tf.placeholder(tf.float32, [], name="done") global_step_ph = tf.placeholder(tf.int32, [], name="sample_global_step") enqueue_op = queue.enqueue( [obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, global_step_ph]) self.enqueue = U.function( [obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, global_step_ph], enqueue_op) self.max_iteration_count = self.config.num_iterations if self.config.exploration_schedule == "constant": self.exploration = ConstantSchedule(0.1) elif self.config.exploration_schedule == "linear": # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). self.exploration = LinearSchedule( schedule_timesteps=self.config.num_iterations / 4, initial_p=1.0, final_p=0.02) elif self.config.exploration_schedule == "piecewise": approximate_num_iters = self.config.num_iterations self.exploration = PiecewiseSchedule([ (0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01) ], outside_value=0.01) else: raise ValueError("Bad exploration schedule")
def main(): exp_dir = './runs/podworld' # by default CSV logs will be created in OS temp directory logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard'], log_suffix=None) # create Atari environment, use no-op reset, max pool last two frames env = make_podworld('podworld-v0') # by default monitor will log episod reward and log env = bench.Monitor(env, logger.get_dir()) learn_params = defaults.atari_breakout() learn_params['checkpoint_path'] = exp_dir learn_params['checkpoint_freq'] = 100000 learn_params['print_freq'] = 10 learn_params['exploration_scheduler'] = PiecewiseSchedule([ \ (0, 1.0), (int(1e6), 0.1), (int(1e7), 0.01) ], outside_value=0.01) model = deepq.learn(env, total_timesteps=int(1e7), **learn_params) model.save('podworld_model.pkl') env.close()
def updateEpsilon(): epsilon = PiecewiseSchedule([(0, 1.0), (20000, 0.5), (50000, 0.25), (100000, 0.12), (500000, 0.05) ], outside_value=0.2) return epsilon
def test_piecewise_schedule(): ps = PiecewiseSchedule([(-5, 100), (5, 200), (10, 50), (100, 50), (200, -50)], outside_value=500) assert np.isclose(ps.value(-10), 500) assert np.isclose(ps.value(0), 150) assert np.isclose(ps.value(5), 200) assert np.isclose(ps.value(9), 80) assert np.isclose(ps.value(50), 50) assert np.isclose(ps.value(80), 50) assert np.isclose(ps.value(150), 0) assert np.isclose(ps.value(175), -25) assert np.isclose(ps.value(201), 500) assert np.isclose(ps.value(500), 500) assert np.isclose(ps.value(200 - 1e-10), -50)
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--env', help='environment ID', default='Breakout') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--prioritized', type=int, default=1) parser.add_argument('--num-timesteps', type=int, default=int(10e6)) parser.add_argument('experiment_id') args = parser.parse_args() logging_directory = Path('./experiments/{}--{}'.format(args.experiment_id, args.env)) if not logging_directory.exists(): logging_directory.mkdir(parents=True) logger.configure(str(logging_directory), ['stdout', 'tensorboard', 'json']) model_directory = logging_directory / 'models' if not model_directory.exists(): model_directory.mkdir(parents=True) set_global_seeds(args.seed) env_name = args.env + "NoFrameskip-v4" env = make_atari(env_name) env = bench.Monitor(env, logger.get_dir()) env = deepq.wrap_atari_dqn(env) model = models.cnn_to_mlp( convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens=[256], ) exploration_schedule = PiecewiseSchedule( endpoints=[(0, 1), (1e6, 0.1), (5 * 1e6, 0.01)], outside_value=0.01) act = learn( env, q_func=model, beta1=0.9, beta2=0.99, epsilon=1e-4, max_timesteps=args.num_timesteps, buffer_size=1000000, exploration_schedule=exploration_schedule, start_lr=1e-4, end_lr=5 * 1e-5, start_step=1e6, end_step=5 * 1e6, train_freq=4, print_freq=10, batch_size=32, learning_starts=50000, target_network_update_freq=10000, gamma=0.99, prioritized_replay=bool(args.prioritized), model_directory=model_directory ) act.save(str(model_directory / "act_model.pkl")) env.close()
def main(): exp_dir = './runs/breakout' # by default CSV logs will be created in OS temp directory logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard'], log_suffix=None) # create Atari environment, use no-op reset, max pool last two frames env = make_atari('BreakoutNoFrameskip-v4') # by default monitor will log episod reward and log env = bench.Monitor(env, logger.get_dir()) env = deepq.wrap_atari_dqn(env) learn_params = defaults.atari_breakout() learn_params['checkpoint_path'] = exp_dir learn_params['checkpoint_freq'] = 100000 learn_params['print_freq'] = 10 learn_params['exploration_scheduler'] = PiecewiseSchedule([ \ (0, 1.0), (int(1e6), 0.1), (int(1e7), 0.01) ], outside_value=0.01) model = deepq.learn( env, # below are defaults #convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], #hiddens=[256], total_timesteps=int(3e7), **learn_params) model.save('breakout_model.pkl') env.close()
class Worker(object): def __init__(self, is_chief, env, model, config, should_render=True): self.config = config self.is_chief = is_chief self.env = env self.should_render = should_render self.act, self.train, self.update_target, self.debug = multi_deepq.build_train( make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name), q_func=model, num_actions=env.action_space.n, gamma=config.gamma, optimizer=tf.train.AdamOptimizer(learning_rate=config.learning_rate), reuse=(not is_chief), ) self.max_iteraction_count = int(self.config.num_iterations) # Create the replay buffer self.replay_buffer = ReplayBuffer(config.replay_size) if self.config.exploration_schedule == "constant": self.exploration = ConstantSchedule(0.1) elif self.config.exploration_schedule == "linear": # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). self.exploration = LinearSchedule( schedule_timesteps=self.config.num_iterations / 4, initial_p=1.0, final_p=0.02) elif self.config.exploration_schedule == "piecewise": approximate_num_iters = self.config.num_iterations self.exploration = PiecewiseSchedule([ (0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01) ], outside_value=0.01) else: raise ValueError("Bad exploration schedule") def run(self, session, coord): episode_rewards = [0.0] td_errors = [0.0] obs = self.env.reset() start_time = timer() event_timer = EventTimer() for t in range(self.max_iteraction_count): if t > 1000 and t % 500 == 0: event_timer.start() # Take action and update exploration to the newest value action = self.act(np.array(obs)[None], update_eps=self.exploration.value(t), session=session)[0] event_timer.measure('act') new_obs, rew, done, _ = self.env.step(action) event_timer.measure('step') # Store transition in the replay buffer. self.replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = self.env.reset() episode_rewards.append(0) event_timer.measure('replay_buffer') # show_episode = len(episode_rewards) > 100 and np.mean(episode_rewards[-101:-1]) >= 200 show_episode = len(episode_rewards) % 100 == 0 if self.should_render and self.is_chief and show_episode: # Show off the result self.env.render() # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if t > 1000 and t % self.config.train_frequency == 0: obses_t, actions, rewards, obses_tp1, dones = self.replay_buffer.sample(self.config.batch_size) td_error = self.train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards), session=session) td_errors.append(np.mean(td_error)) # if (t / self.config.train_frequency) % 10 == 0: # print("mean TD error: {}".format(np.mean(td_error))) # print("Gradient norm: {}".format(grad_norm)) event_timer.measure('train') # Update target network periodically. if self.is_chief and t % self.config.target_update_frequency == 0: self.update_target(session=session) event_timer.measure('update_target') event_timer.stop() if self.is_chief and done and len(episode_rewards) % 10 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", len(episode_rewards)) logger.record_tabular("mean episode reward", round(np.mean(episode_rewards[-101:-1]), 1)) logger.record_tabular("mean td_error", round(np.mean(td_errors[-101:]), 5)) logger.record_tabular("time elapsed", timer() - start_time) logger.record_tabular("steps/s", t / (timer() - start_time)) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) event_timer.print_shares() event_timer.print_averages() logger.dump_tabular()
class Actor(object): def __init__(self, index, is_chief, env, model, queue, config, logger, episode_logger, should_render=False): self.config = config self.is_chief = is_chief self.env = env self.global_step = tf.train.get_global_step() self.should_render = should_render self.logger = logger self.episode_logger = episode_logger self.log_frequency = 10 with tf.device('/cpu:0'): self.act, self.update_params, self.debug = qdqn.build_act( make_obs_ph=lambda name: U.Uint8Input(self.env.observation_space.shape, name=name), q_func=model, num_actions=self.env.action_space.n, scope="actor_{}".format(index), learner_scope="learner", reuse=False) with tf.device('/cpu:0'): obs_t_input = tf.placeholder(tf.uint8, self.env.observation_space.shape, name="obs_t") act_t_ph = tf.placeholder(tf.int32, self.env.action_space.shape, name="action") rew_t_ph = tf.placeholder(tf.float32, [], name="reward") obs_tp1_input = tf.placeholder(tf.uint8, self.env.observation_space.shape, name="obs_tp1") done_mask_ph = tf.placeholder(tf.float32, [], name="done") global_step_ph = tf.placeholder(tf.int32, [], name="sample_global_step") enqueue_op = queue.enqueue( [obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, global_step_ph]) self.enqueue = U.function( [obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, global_step_ph], enqueue_op) self.max_iteration_count = self.config.num_iterations if self.config.exploration_schedule == "constant": self.exploration = ConstantSchedule(0.1) elif self.config.exploration_schedule == "linear": # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). self.exploration = LinearSchedule( schedule_timesteps=self.config.num_iterations / 4, initial_p=1.0, final_p=0.02) elif self.config.exploration_schedule == "piecewise": approximate_num_iters = self.config.num_iterations self.exploration = PiecewiseSchedule([ (0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01) ], outside_value=0.01) else: raise ValueError("Bad exploration schedule") def run(self, session, coord): episode_rewards = [0.0] episode_length = 0 obs = self.env.reset() done = False global_step = session.run(self.global_step) exploration_value = self.exploration.value(global_step) print("Starting acting from step {}".format(global_step)) start_time = timer() event_timer = EventTimer() for t in itertools.count(): if coord.should_stop(): break if t % 10 == 0: global_step = session.run(self.global_step) exploration_value = self.exploration.value(global_step) if t > 0 and t % 500 == 0: event_timer.start() # Take action and update exploration to the newest value action = self.act(np.array(obs)[None], update_eps=exploration_value, session=session)[0] if done and len(episode_rewards) % self.log_frequency == 0: print(self.debug["q_values"](np.array(obs)[None], session=session)) event_timer.measure('act') new_obs, rew, done, _ = self.env.step(action) event_timer.measure('step') self.enqueue(obs, action, rew, new_obs, float(done), global_step, session=session) obs = new_obs episode_length += 1 episode_rewards[-1] += rew if done: self.episode_logger.record_tabular("step", t) self.episode_logger.record_tabular("global_step", global_step) self.episode_logger.record_tabular("reward", episode_rewards[-1]) self.episode_logger.record_tabular("length", episode_length) self.episode_logger.record_tabular("end_time", timer() - start_time) self.episode_logger.dump_tabular() obs = self.env.reset() episode_rewards.append(0) episode_length = 0 event_timer.measure('queue') # show_episode = len(episode_rewards) > 100 and np.mean(episode_rewards[-101:-1]) >= 200 show_episode = len(episode_rewards) % 10 == 0 if self.should_render and self.is_chief and show_episode: # Show off the result self.env.render() # Update target network periodically. if t % self.config.params_update_frequency == 0: self.update_params(session=session) event_timer.measure('update_params') event_timer.stop() if self.is_chief and done and len(episode_rewards) % self.log_frequency == 0: self.logger.record_tabular("step", t) self.logger.record_tabular("global_step", global_step) self.logger.record_tabular("episodes", len(episode_rewards)) self.logger.record_tabular("mean episode reward", round(np.mean(episode_rewards[-101:-1]), 1)) self.logger.record_tabular("time elapsed", timer() - start_time) self.logger.record_tabular("steps/s", t / (timer() - start_time)) self.logger.record_tabular("% time spent exploring", int(100 * exploration_value)) event_timer.print_shares(self.logger) event_timer.print_averages(self.logger) self.logger.dump_tabular()
make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name), v_func=v_func_model, adv_func=binary_model_1, learning_rate=args.lr, num_actions=env.action_space.n, en=args.en, gamma=0.99, grad_norm_clipping=10, ) predict_values = debug['predict_values'] approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([ (0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01) ], outside_value=0.01) replay_buffer = ReplayBuffer(args.replay_buffer_size) U.initialize() update_target() num_iters = 0 done_times = 0 if not os.path.exists('./svgd_adv_learning/' + env_name): os.makedirs('./svgd_adv_learning/' + env_name) obs = env.reset()
else: act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: U.Uint8Input( env.observation_space.shape, name=name), q_func=model, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr), gamma=0.99, double_q=args.double_q, noisy=args.noisy) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule( [ (0, 1.0), (int(args.num_steps * 0.1), 0.1 ) # (approximate_num_iters / 5, 0.01) ], outside_value=0.1) learning_rate = PiecewiseSchedule([(0, 1e-3), (1, 1e-3)], outside_value=1e-3) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size)
def main(): with tf_util.make_session(4) as session: act_fn, train_fn, target_update_fn, debug_fn = deepq.build_train( make_obs_ph=lambda name: Uint8Input([input_height, input_width], name=name), q_func=q_function_nn, num_actions=action_space_size, optimizer=tf.train.AdamOptimizer(learning_rate=0.001), gamma=0.99, grad_norm_clipping=10, double_q=False) epsilon = PiecewiseSchedule([(0, 1.0), (10000, 1.0), # since we start training at 10000 steps (20000, 0.4), (50000, 0.2), (100000, 0.1), (500000, 0.05)], outside_value=0.01) replay_memory = PrioritizedReplayBuffer(replay_memory_size, replay_alpha) beta = LinearSchedule(int(NUM_STEPS/4), initial_p=replay_beta, final_p=1.0) tf_util.initialize() target_update_fn() state = env.reset() state = preprocess_frame(state) watch_train = False dq = [] # a queue to store episode rewards start_step = 1 episode = 1 if is_load_model: dict_state = load_model() replay_memory = dict_state["replay_memory"] dq = dict_state["dq"] start_step = dict_state["step"] + 1 for step in itertools.count(start=start_step): action = act_fn(state[np.newaxis], update_eps=epsilon.value(step))[0] state_tplus1, reward, is_finished, _ = env.step(action) dq.append(reward) if watch_flag: env.render() time.sleep(1.0/fps) state_tplus1 = preprocess_frame(state_tplus1) replay_memory.add(state, action, reward, state_tplus1, float(is_finished)) state = state_tplus1 if is_finished: ep_reward = sum(dq) log.logkv("Steps", step) log.logkv("Episode reward", ep_reward) log.logkv("Episode number", episode) log.dumpkvs() print("Step", step, ". Finished episode", episode, "with reward ", ep_reward) dq = [] state = preprocess_frame(env.reset()) episode += 1 for _ in range(30): # NOOP for ~90 frames to skip the start screen. Range 30 used because each # step executed for 3 frames on average. Action 0 stands for doing nothing env.step(0) if watch_flag: env.render() if step > 10000 and step % learn_freq == 0: # only start training after 10000 steps are completed batch = replay_memory.sample(batch_size, beta=beta.value(step)) states = batch[0] actions = batch[1] rewards = batch[2] states_tplus1 = batch[3] finished_vars = batch[4] weights = batch[5] state_indeces = batch[6] errors = train_fn(states, actions, rewards, states_tplus1, finished_vars, weights) priority_order_new = np.abs(errors) + replay_epsilon replay_memory.update_priorities(state_indeces, priority_order_new) if step % save_freq == 0: print("State save", step) dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) if step > NUM_STEPS: print("Finished training. Saving model to ./saved_model/model.ckpt") dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) break
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, action_replay=True, param_noise=False, callback=None, load_path=None, **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = rdqn.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) elif action_replay: replay_buffer = ActionreplayBuffer(buffer_size, env.action_space.n) beta_schedule = None else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = PiecewiseSchedule([(0, 1.0), (int(1e6), 0.1), (int(1e7), 0.01)], outside_value=0.01) '''exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps)''' # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and action_replay: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size, action) if len(obses_t) != 0: weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) logger.record_tabular("reinforce terminate action :", action) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
# value_summary.value.add(tag='episode') qec_summary.value.add(tag='qec_mean') qec_summary.value.add(tag='qec_fount') value_summary.value.add(tag='steps') value_summary.value.add(tag='episodes') with U.make_session(4) as sess: # EMDQN exploration = PiecewiseSchedule( [ (0, 1.0), (args.end_training, 1.0), # (args.end_training+1, 1.0), # (args.end_training+1, 0.005), (args.end_training + 100000, 0.005), # (approximate_num_iters / 5, 0.1), # (approximate_num_iters / 3, 0.01) ], outside_value=0.005) replay_buffer = ReplayBufferContra(args.replay_buffer_size) ec_buffer = [] buffer_size = int(100000) # input_dim = 1024 for i in range(env.action_space.n): ec_buffer.append( LRU_KNN_TEST(buffer_size, args.latent_dim, 'game',
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), # optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=0.99, grad_norm_clipping=10, input_dim=input_dim, batch_size=args.batch_size, K=args.negative_samples, predict=args.predict ) tf_writer.add_graph(sess.graph) approximate_num_iters = args.num_steps exploration = PiecewiseSchedule([ (0, 1.0), (400000, 0.05), (800000, 0.01) ], outside_value=0.01) # if args.prioritized: # replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) # beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) # else: # replay_buffer = ReplayBufferHash(args.replay_buffer_size) U.initialize() # update_encoder([0]) num_iters = 0 # Load the model state = maybe_load_model(savedir, container)
def learn( env, p_dist_func, lr=5e-4, eps=0.0003125, max_timesteps=100000, buffer_size=50000, exp_t1=1e6, exp_p1=0.1, exp_t2=25e6, exp_p2=0.01, # exploration_fraction=0.1, # exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=0.95, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, callback=None, dist_params=None, n_action=None, action_map=None): """Train a distdeepq model. Parameters ------- env: gym.Env environment to train on p_dist_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/distdeepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = make_session(num_cpu=num_cpu) sess.__enter__() # logger.configure() def make_obs_ph(name): return U.BatchInput(env.observation_space.shape, name=name) if dist_params is None: raise ValueError('dist_params is required') # z, dz = build_z(**dist_params) act, train, update_target, debug = distdeepq.build_train( make_obs_ph=make_obs_ph, p_dist_func=p_dist_func, # num_actions=env.action_space.n, n_action=n_action, optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=eps), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, dist_params=dist_params) act_params = { 'make_obs_ph': make_obs_ph, 'p_dist_func': p_dist_func, 'num_actions': n_action, 'dist_params': dist_params } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. #exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), # initial_p=1.0, # final_p=exploration_final_eps) # exploration = PiecewiseSchedule([(0, 1.0),(max_timesteps/25, 0.1), # (max_timesteps, 0.01)], outside_value=0.01) exploration = PiecewiseSchedule([(0, 1.0), (exp_t1, exp_p1), (exp_t2, exp_p2)], outside_value=exp_p2) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] reset = False action_val = action_map[action] new_obs, rew, done, _ = env.step(action_val) # env.render() # rew = rew-1 for proposed loss with new metric # rew = rew-1 # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) # debug['pi'] = tf.Print(debug['pi'], [debug['pi'], "target pi"]) # tf.Print(debug['mu'], [debug['mu'], "target mu"]) # tf.Print(debug['sigma'], [debug['sigma'], "target sigma"]) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) U.load_state(model_file) return ActWrapper(act, act_params)
return actual_model(img_in, num_actions, scope, layer_norm=args.layer_norm, **kwargs) act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name), q_func=model_wrapper, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=0.99, grad_norm_clipping=10, double_q=args.double_q, param_noise=args.param_noise ) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([ (0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01) ], outside_value=0.01) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size) U.initialize() update_target() num_iters = 0 # Load the model state = maybe_load_model(savedir, container)
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.01, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, test_agent=1e6, param_noise=False, double=True, lambda_double=False, lam=0.2, targets=1, piecewise_schedule=False, callback=None): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) act, train, update_target, debug = deepq_base.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise, double_q=double, lambda_double=lambda_double, lam=lam, targets=targets, ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. if piecewise_schedule: exploration = PiecewiseSchedule(endpoints=[(0,1.0),(1e6,exploration_final_eps),(24e6,0.01)], outside_value=0.01) else: exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() targets_seq = np.array([i for i in range(targets)],dtype=np.int32) targets_lam = lam ** targets_seq for target in range(targets): update_target[target]() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True epinfobuf = deque(maxlen=100) test_flag = False with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, info = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs maybeepinfo = info.get('episode') if maybeepinfo: epinfobuf.extend([maybeepinfo]) episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights, targets_lam) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. targets_seq = np.roll(targets_seq, 1) targets_lam = np.roll(targets_lam, -1) update_target[targets_seq[0]]() if t > learning_starts and t % test_agent == 0: test_flag = True if done and test_flag: nEpisodes = 50 rewards = deque(maxlen=nEpisodes) for i in range(nEpisodes): obs, done = env.reset(), False episode_rew = 0 reward = 0 maybeepinfo = None while maybeepinfo is None: obs, rew, done, info = env.step(act(obs[None], stochastic=True, update_eps=0.001)[0]) maybeepinfo = info.get('episode') if maybeepinfo: reward = maybeepinfo['r'] rewards.extend([reward]) # time.sleep(0.01) # print("Episode:", reward) logger.record_tabular("test_reward_mean", np.mean([rew for rew in rewards])) logger.record_tabular("steps", t) logger.dump_tabular() obs = env.reset() test_flag = False mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: mean_reward = safemean([epinfo['r'] for epinfo in epinfobuf]) logger.record_tabular("episode_reward_mean", mean_reward) logger.record_tabular("eplenmean" , safemean([epinfo['l'] for epinfo in epinfobuf])) logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) #logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_reward > saved_mean_reward or ((mean_reward >= saved_mean_reward) and mean_reward > 0): if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_reward act.save() if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_state(model_file) return act
if args.dueling else simple_bootstrap_model, num_actions=2 * args.mdp_dimension, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=0.99, grad_norm_clipping=10, double_q=args.double_q, heads=1, device=args.device) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule( [ (0, 1.0), (args.num_steps / args.epsilon_schedule, 0.1), # (approximate_num_iters / 50, 0.1), (args.num_steps / (args.epsilon_schedule * 0.1), 0.01 ) # (approximate_num_iters / 5, 0.01) ], outside_value=0.01) learning_rate = PiecewiseSchedule( [(0, 1e-4), (args.num_steps / args.learning_schedule, 1e-4), (args.num_steps / (args.learning_schedule * 0.5), 5e-5)], outside_value=5e-5) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0)
act(np.array(tobs)[None], stochastic=0.05, act_noise=np.random.randn((1, args.latent_dim)))[0] tobs, rew, done, info = tenv.step(action) print(info) if done and len(info["rewards"]) > 0: score = info["rewards"][-1] print("episode #%d: %.2f" % (i + 1, score)) scores.append(score) tobs = tenv.reset() break avg_score = np.mean(scores) print("avgscore: %.2f" % avg_score) return avg_score approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([(0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01)], outside_value=0.01) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size) U.initialize() update_target() num_iters = 0
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun, mi_w_schedule, et_w_schedule, mi_prioritization): if (replay_strategy == 'future') or (replay_strategy == 'final'): future_p = 1 - (1. / (1 + replay_k)) else: future_p = 0 mi_w_scheduler = PiecewiseSchedule(endpoints=mi_w_schedule) et_w_scheduler = PiecewiseSchedule(endpoints=et_w_schedule) def _sample_her_transitions(ddpg, ir, episode_batch, batch_size_in_transitions, mi_r_scale, sk_r_scale, t): """episode_batch is {key: array(buffer_size x T x dim_key)} """ T = episode_batch['u'].shape[1] rollout_batch_size = episode_batch['u'].shape[0] batch_size = batch_size_in_transitions # Select which episodes and time steps to use. episode_idxs = np.random.randint(0, rollout_batch_size, batch_size) t_samples = np.random.randint(T, size=batch_size) # calculate intrinsic rewards mi_trans = np.zeros([episode_idxs.shape[0], 1]) sk_trans = np.zeros([episode_idxs.shape[0], 1]) if ir: if mi_prioritization and not (episode_batch['p'].sum() == 0): r_traj = rankdata(episode_batch['p'], method='dense') r_traj = r_traj - 1 if not (r_traj.sum() == 0): p_traj = r_traj / r_traj.sum() episode_idxs = np.random.choice(rollout_batch_size, size=batch_size, replace=True, p=p_traj.flatten()) o_curr = episode_batch['o'][episode_idxs, t_samples].copy() o_curr = np.reshape(o_curr, (o_curr.shape[0], 1, o_curr.shape[-1])) o_next = episode_batch['o'][episode_idxs, t_samples + 1].copy() o_next = np.reshape(o_next, (o_next.shape[0], 1, o_next.shape[-1])) o_s = np.concatenate((o_curr, o_next), axis=1) if mi_r_scale > 0: neg_l = ddpg.run_mi(o_s) mi_trans = (-neg_l).copy() o = episode_batch['o'][episode_idxs, t_samples].copy() z = episode_batch['z'][episode_idxs, t_samples].copy() if sk_r_scale > 0: sk_r = ddpg.run_sk(o, z) sk_trans = sk_r.copy() # # transitions = {} for key in episode_batch.keys(): if not (key == 'm' or key == 's' or key == 'p'): transitions[key] = episode_batch[key][episode_idxs, t_samples].copy() else: transitions[key] = episode_batch[key][episode_idxs].copy() transitions['m'] = transitions['m'].flatten().copy() transitions['s'] = transitions['s'].flatten().copy() her_indexes = np.where(np.random.uniform(size=batch_size) < future_p) future_offset = np.random.uniform(size=batch_size) * (T - t_samples) future_offset = future_offset.astype(int) future_t = (t_samples + 1 + future_offset)[her_indexes] if replay_strategy == 'final': future_t[:] = T future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t] transitions['g'][her_indexes] = future_ag info = {} for key, value in transitions.items(): if key.startswith('info_'): info[key.replace('info_', '')] = value reward_params = {k: transitions[k] for k in ['ag_2', 'g']} reward_params['info'] = info transitions['r'] = reward_fun(**reward_params) transitions = { k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys() } if ir: transitions['m'] = mi_trans.flatten().copy() transitions['s'] = sk_trans.flatten().copy() transitions['m_w'] = mi_w_scheduler.value(t) transitions['s_w'] = 1.0 transitions['r_w'] = 1.0 transitions['e_w'] = et_w_scheduler.value(t) assert (transitions['u'].shape[0] == batch_size_in_transitions) return transitions return _sample_her_transitions
action = act(np.array(tobs)[None], stochastic=0.05)[0] tobs, rew, done, info = tenv.step(action) print(info) if done and len(info["rewards"]) > 0: score = info["rewards"][-1] print("episode #%d: %.2f" % (i + 1, score)) scores.append(score) tobs = tenv.reset() break avg_score = np.mean(scores) print("avgscore: %.2f" % avg_score) return avg_score approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([(0, 1.0), (800000, 0.05), (1600000, 0.01)], outside_value=0.01) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size, frame_history_len=1) U.initialize() update_target() num_iters = 0
def main(): with tf_util.make_session() as session: act_fn, train_fn, target_update_fn, debug_fn = deepq.build_train( make_obs_ph=lambda name: Uint8Input([int(input_height), int(input_width)], name=name), q_func=q_function_nn, num_actions=action_space_size, optimizer=tf.train.AdamOptimizer(learning_rate=0.001), gamma=0.99, grad_norm_clipping=10, double_q=False) epsilon = PiecewiseSchedule([(0, 1.0), (400, 1.0), # since we start training at 10000 steps (800, 0.4), (2000, 0.2), (4000, 0.1), (20000, 0.05)], outside_value=0.01) replay_memory = PrioritizedReplayBuffer(replay_memory_size, replay_alpha) beta = LinearSchedule(int(NUM_STEPS/4), initial_p=replay_beta, final_p=1.0) tf_util.initialize() target_update_fn() state = env.reset() state = preprocess_frame(state) watch_train = False dq = [] # a queue to store episode rewards start_step = 1 episode = 1 if is_load_model: dict_state = load_model() replay_memory = dict_state["replay_memory"] dq = dict_state["dq"] start_step = dict_state["step"] + 1 last_step = 0 iteration = 1 win_count = 0 lose_count = 0 for step in itertools.count(start=start_step): action = act_fn(state[np.newaxis], update_eps=epsilon.value(step))[0] # print (str(action) + " ", end=' ') # print (action) state_tplus1, reward, is_finished = env.act(action, step-last_step) dq.append(reward) if watch_flag: # env.render() time.sleep(1.0/fps) state_tplus1 = preprocess_frame(state_tplus1) if is_finished == "win": win_count += 1 is_finished = True r = "win" elif is_finished == "lose": lose_count += 1 is_finished = True r = "lose" else: is_finished = False replay_memory.add(state, action, reward, state_tplus1, float(is_finished)) state = state_tplus1 if is_finished: ep_reward = sum(dq) log.logkv("Steps", step-last_step) log.logkv("Episode reward", ep_reward) log.logkv("Episode number", episode) log.logkv("Results", r) log.dumpkvs() # print () print ("Step", step-last_step, "Image", step, ". Finished episode", episode, "with reward ", ep_reward, "Results", r) print ("================================") os.system("mv new_" + str(step-last_step) + ".png logs/new_" + str(iteration) + ".png") iteration += 1 os.system("rm new_*") dq = [] state = preprocess_frame(env.reset()) episode += 1 last_step = step # if step > 10000 and step % learn_freq == 0: if step > 40 and step % learn_freq == 0: batch = replay_memory.sample(batch_size, beta=beta.value(step)) states = batch[0] actions = batch[1] rewards = batch[2] states_tplus1 = batch[3] finished_vars = batch[4] weights = batch[5] state_indeces = batch[6] errors = train_fn(states, actions, rewards, states_tplus1, finished_vars, weights) priority_order_new = np.abs(errors) + replay_epsilon replay_memory.update_priorities(state_indeces, priority_order_new) if step % save_freq == 0: print("State save", step) dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) if step > NUM_STEPS: print("Finished training. Saving model to ./saved_model/model.ckpt") dict_state = { "step": step, "replay_memory": replay_memory, "dq": dq } save_model(dict_state) break
action = act(np.array(tobs)[None], stochastic=0.05) tobs, rew, done, info = tenv.step(action) print(info) if done and len(info["rewards"]) > 0: score = info["rewards"][-1] print("episode #%d: %.2f" % (i + 1, score)) scores.append(score) tobs = tenv.reset() break avg_score = np.mean(scores) print("avgscore: %.2f" % avg_score) return avg_score approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([(0, 1.0), (approximate_num_iters / 10, 0.1), (approximate_num_iters / 5, 0.01)], outside_value=0.01) U.initialize() num_iters = 0 # Load the model state = maybe_load_model(savedir, container) if state is not None: num_iters, replay_buffer = state["num_iters"], state[ "replay_buffer"], monitored_env.set_state(state["monitor_state"]) start_time, start_steps = None, None steps_per_iter = RunningAvg(0.999) iteration_time_est = RunningAvg(0.999)
noisy=args.noisy ) else: act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name), q_func=model, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr), gamma=0.99, double_q=args.double_q, noisy=args.noisy ) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([ (0, 1.0), (int(args.num_steps *0.1), 0.1) # (approximate_num_iters / 5, 0.01) ], outside_value=0.1) learning_rate = PiecewiseSchedule([ (0, 1e-3), (1, 1e-3) ], outside_value=1e-3) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size) U.initialize() update_target()
act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: Uint8Input(env.observation_space.shape, name=name), q_func=model_wrapper, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=0.99, grad_norm_clipping=10, double_q=args.double_q, param_noise=args.param_noise) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([(0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01)], outside_value=0.01) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(args.replay_buffer_size) U.initialize() update_target() num_iters = 0
act, train, update_target, debug = deepq.build_train( make_obs_ph=lambda name: U.Uint8Input( env.observation_space.shape, name=name), q_func=dueling_model if args.dueling else model, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=0.99, grad_norm_clipping=10, double_q=args.double_q) approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule( [ (0, 1.0), (1e6 / 4, 0.1), # (approximate_num_iters / 50, 0.1), # (5e6 / 4, 0.01) # (approximate_num_iters / 5, 0.01) ], outside_value=0.01) learning_rate = PiecewiseSchedule( [ (0, 1e-4), (1e6 / 4, 1e-4), # (approximate_num_iters / 50, 0.1), (5e6 / 4, 5e-5) # (approximate_num_iters / 5, 0.01) ], outside_value=5e-5) if args.prioritized: replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha) beta_schedule = LinearSchedule(approximate_num_iters,
num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4), gamma=args.gamma, grad_norm_clipping=10, ) approximate_num_iters = args.num_steps exploration = PiecewiseSchedule( [ (0, 1), (args.end_training, 1.0), # (args.end_training+1, 1.0), # (args.end_training+1, 0.005), (args.end_training + 10000, 1.0), (args.end_training + 200000, 0.05), (args.end_training + 400000, 0.01), # (approximate_num_iters / 5, 0.1), # (approximate_num_iters / 3, 0.01) ], outside_value=0.01) replay_buffer = ReplayBufferHash(args.replay_buffer_size) U.initialize() num_iters = 0 num_episodes = 0 non_discount_return = [0.0] discount_return = [0.0] # Load the model
act(np.array(tobs)[None], stochastic=0.05, act_noise=np.random.randn(1, args.latent_dim))[0] tobs, rew, done, info = tenv.step(action) print(info) if done and len(info["rewards"]) > 0: score = info["rewards"][-1] print("episode #%d: %.2f" % (i + 1, score)) scores.append(score) tobs = tenv.reset() break avg_score = np.mean(scores) print("avgscore: %.2f" % avg_score) return avg_score approximate_num_iters = args.num_steps / 4 exploration = PiecewiseSchedule([(0, 1.0), (args.begin_training, 1.0), (approximate_num_iters / 10, 0.1), (approximate_num_iters / 5, 0.01)], outside_value=0.01) U.initialize() num_iters = 0 # Load the model state = maybe_load_model(savedir, container) if state is not None: num_iters, replay_buffer = state["num_iters"], state[ "replay_buffer"], monitored_env.set_state(state["monitor_state"]) start_time, start_steps = None, None steps_per_iter = RunningAvg(0.999) iteration_time_est = RunningAvg(0.999)