def main(): parser = argparse.ArgumentParser() parser.add_argument("processes", type=int) parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--t-max", type=int, default=5) parser.add_argument("--replay-start-size", type=int, default=10000) parser.add_argument("--n-times-replay", type=int, default=4) parser.add_argument("--beta", type=float, default=1e-2) parser.add_argument("--profile", action="store_true") parser.add_argument("--steps", type=int, default=10**7) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--lr", type=float, default=7e-4) parser.add_argument("--eval-interval", type=int, default=10**5) parser.add_argument("--eval-n-runs", type=int, default=10) parser.add_argument("--use-lstm", action="store_true") parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default="") parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.set_defaults(use_lstm=False) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. # If you use more than one processes, the results will be no longer # deterministic even with the same random seed. utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.processes) + args.seed * args.processes assert process_seeds.max() < 2**31 args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) n_actions = gym.make(args.env).action_space.n input_to_hidden = nn.Sequential( nn.Conv2d(4, 16, 8, stride=4), nn.ReLU(), nn.Conv2d(16, 32, 4, stride=2), nn.ReLU(), nn.Flatten(), nn.Linear(2592, 256), nn.ReLU(), ) head = acer.ACERDiscreteActionHead( pi=nn.Sequential( nn.Linear(256, n_actions), SoftmaxCategoricalHead(), ), q=nn.Sequential( nn.Linear(256, n_actions), DiscreteActionValueHead(), ), ) if args.use_lstm: model = pfrl.nn.RecurrentSequential( input_to_hidden, nn.LSTM(num_layers=1, input_size=256, hidden_size=256), head, ) else: model = nn.Sequential(input_to_hidden, head) model.apply(pfrl.initializers.init_chainer_default) opt = pfrl.optimizers.SharedRMSpropEpsInsideSqrt(model.parameters(), lr=args.lr, eps=4e-3, alpha=0.99) replay_buffer = EpisodicReplayBuffer(10**6 // args.processes) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 agent = acer.ACER( model, opt, t_max=args.t_max, gamma=0.99, replay_buffer=replay_buffer, n_times_replay=args.n_times_replay, replay_start_size=args.replay_start_size, beta=args.beta, phi=phi, max_grad_norm=40, recurrent=args.use_lstm, ) if args.load: agent.load(args.load) def make_env(process_idx, test): # Use different random seeds for train and test envs process_seed = process_seeds[process_idx] env_seed = 2**31 - 1 - process_seed if test else process_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env if args.demo: env = make_env(0, True) eval_stats = experiments.eval_performance(env=env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: # Linearly decay the learning rate to zero def lr_setter(env, agent, value): for pg in agent.optimizer.param_groups: assert "lr" in pg pg["lr"] = value lr_decay_hook = experiments.LinearInterpolationHook( args.steps, args.lr, 0, lr_setter) experiments.train_agent_async( agent=agent, outdir=args.outdir, processes=args.processes, make_env=make_env, profile=args.profile, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, global_step_hooks=[lr_decay_hook], save_best_so_far_agent=False, )
def main(): parser = argparse.ArgumentParser() parser.add_argument("--processes", type=int, default=16) parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--t-max", type=int, default=5) parser.add_argument("--beta", type=float, default=1e-2) parser.add_argument("--profile", action="store_true") parser.add_argument("--steps", type=int, default=8 * 10**7) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--lr", type=float, default=7e-4) parser.add_argument("--eval-interval", type=int, default=250000) parser.add_argument("--eval-n-steps", type=int, default=125000) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--load", type=str, default="") parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. # If you use more than one processes, the results will be no longer # deterministic even with the same random seed. utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.processes) + args.seed * args.processes assert process_seeds.max() < 2**31 args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(process_idx, test): # Use different random seeds for train and test envs process_seed = process_seeds[process_idx] env_seed = 2**31 - 1 - process_seed if test else process_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env sample_env = make_env(0, False) obs_size = sample_env.observation_space.low.shape[0] n_actions = sample_env.action_space.n model = nn.Sequential( nn.Conv2d(obs_size, 16, 8, stride=4), nn.ReLU(), nn.Conv2d(16, 32, 4, stride=2), nn.ReLU(), nn.Flatten(), nn.Linear(2592, 256), nn.ReLU(), pfrl.nn.Branched( nn.Sequential( nn.Linear(256, n_actions), SoftmaxCategoricalHead(), ), nn.Linear(256, 1), ), ) # SharedRMSprop is same as torch.optim.RMSprop except that it initializes # its state in __init__, allowing it to be moved to shared memory. opt = SharedRMSpropEpsInsideSqrt(model.parameters(), lr=7e-4, eps=1e-1, alpha=0.99) assert opt.state_dict()["state"], ( "To share optimizer state across processes, the state must be" " initialized before training.") def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 agent = a3c.A3C( model, opt, t_max=args.t_max, gamma=0.99, beta=args.beta, phi=phi, max_grad_norm=40.0, ) if args.load_pretrained: raise Exception("Pretrained models are currently unsupported.") if args.load: agent.load(args.load) if args.demo: env = make_env(0, True) eval_stats = experiments.eval_performance(env=env, agent=agent, n_steps=args.eval_n_steps, n_episodes=None) print("n_steps: {} mean: {} median: {} stdev: {}".format( args.eval_n_steps, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: # Linearly decay the learning rate to zero def lr_setter(env, agent, value): for pg in agent.optimizer.param_groups: assert "lr" in pg pg["lr"] = value lr_decay_hook = experiments.LinearInterpolationHook( args.steps, args.lr, 0, lr_setter) experiments.train_agent_async( agent=agent, outdir=args.outdir, processes=args.processes, make_env=make_env, profile=args.profile, steps=args.steps, eval_n_steps=args.eval_n_steps, eval_n_episodes=None, eval_interval=args.eval_interval, global_step_hooks=[lr_decay_hook], save_best_so_far_agent=True, )
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4", help="Gym Env ID.") parser.add_argument("--gpu", type=int, default=0, help="GPU device ID. Set to -1 to use CPUs only.") parser.add_argument( "--num-envs", type=int, default=8, help="Number of env instances run in parallel.", ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--steps", type=int, default=10**7, help="Total time steps for training.") parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--lr", type=float, default=2.5e-4, help="Learning rate.") parser.add_argument( "--eval-interval", type=int, default=100000, help="Interval (in timesteps) between evaluation phases.", ) parser.add_argument( "--eval-n-runs", type=int, default=10, help="Number of episodes ran in an evaluation phase.", ) parser.add_argument( "--demo", action="store_true", default=False, help="Run demo episodes, not training.", ) parser.add_argument( "--load", type=str, default="", help=("Directory path to load a saved agent data from" " if it is a non-empty string."), ) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument( "--update-interval", type=int, default=128 * 8, help="Interval (in timesteps) between PPO iterations.", ) parser.add_argument( "--batchsize", type=int, default=32 * 8, help="Size of minibatch (in timesteps).", ) parser.add_argument( "--epochs", type=int, default=4, help="Number of epochs used for each PPO iteration.", ) parser.add_argument( "--log-interval", type=int, default=10000, help="Interval (in timesteps) of printing logs.", ) parser.add_argument( "--recurrent", action="store_true", default=False, help="Use a recurrent model. See the code for the model definition.", ) parser.add_argument( "--flicker", action="store_true", default=False, help=("Use so-called flickering Atari, where each" " screen is blacked out with probability 0.5."), ) parser.add_argument( "--no-frame-stack", action="store_true", default=False, help= ("Disable frame stacking so that the agent can only see the current screen." ), ) parser.add_argument( "--checkpoint-frequency", type=int, default=None, help="Frequency at which agents are stored.", ) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**32 args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(idx, test): # Use different random seeds for train and test envs process_seed = int(process_seeds[idx]) env_seed = 2**32 - 1 - process_seed if test else process_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, flicker=args.flicker, frame_stack=False, ) env.seed(env_seed) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env def make_batch_env(test): vec_env = pfrl.envs.MultiprocessVectorEnv([ (lambda: make_env(idx, test)) for idx, env in enumerate(range(args.num_envs)) ]) if not args.no_frame_stack: vec_env = pfrl.wrappers.VectorFrameStack(vec_env, 4) return vec_env sample_env = make_batch_env(test=False) print("Observation space", sample_env.observation_space) print("Action space", sample_env.action_space) n_actions = sample_env.action_space.n obs_n_channels = sample_env.observation_space.low.shape[0] del sample_env def lecun_init(layer, gain=1): if isinstance(layer, (nn.Conv2d, nn.Linear)): pfrl.initializers.init_lecun_normal(layer.weight, gain) nn.init.zeros_(layer.bias) else: pfrl.initializers.init_lecun_normal(layer.weight_ih_l0, gain) pfrl.initializers.init_lecun_normal(layer.weight_hh_l0, gain) nn.init.zeros_(layer.bias_ih_l0) nn.init.zeros_(layer.bias_hh_l0) return layer if args.recurrent: model = pfrl.nn.RecurrentSequential( lecun_init(nn.Conv2d(obs_n_channels, 32, 8, stride=4)), nn.ReLU(), lecun_init(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), lecun_init(nn.Conv2d(64, 64, 3, stride=1)), nn.ReLU(), nn.Flatten(), lecun_init(nn.Linear(3136, 512)), nn.ReLU(), lecun_init(nn.GRU(num_layers=1, input_size=512, hidden_size=512)), pfrl.nn.Branched( nn.Sequential( lecun_init(nn.Linear(512, n_actions), 1e-2), SoftmaxCategoricalHead(), ), lecun_init(nn.Linear(512, 1)), ), ) else: model = nn.Sequential( lecun_init(nn.Conv2d(obs_n_channels, 32, 8, stride=4)), nn.ReLU(), lecun_init(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), lecun_init(nn.Conv2d(64, 64, 3, stride=1)), nn.ReLU(), nn.Flatten(), lecun_init(nn.Linear(3136, 512)), nn.ReLU(), pfrl.nn.Branched( nn.Sequential( lecun_init(nn.Linear(512, n_actions), 1e-2), SoftmaxCategoricalHead(), ), lecun_init(nn.Linear(512, 1)), ), ) opt = torch.optim.Adam(model.parameters(), lr=args.lr, eps=1e-5) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 agent = PPO( model, opt, gpu=args.gpu, phi=phi, update_interval=args.update_interval, minibatch_size=args.batchsize, epochs=args.epochs, clip_eps=0.1, clip_eps_vf=None, standardize_advantages=True, entropy_coef=1e-2, recurrent=args.recurrent, max_grad_norm=0.5, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=make_batch_env(test=True), agent=agent, n_steps=None, n_episodes=args.eval_n_runs, ) print("n_runs: {} mean: {} median: {} stdev: {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: step_hooks = [] # Linearly decay the learning rate to zero def lr_setter(env, agent, value): for param_group in agent.optimizer.param_groups: param_group["lr"] = value step_hooks.append( experiments.LinearInterpolationHook(args.steps, args.lr, 0, lr_setter)) experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(False), eval_env=make_batch_env(True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, checkpoint_freq=args.checkpoint_frequency, eval_interval=args.eval_interval, log_interval=args.log_interval, save_best_so_far_agent=False, step_hooks=step_hooks, )