def _test_abc_batch(self, steps=100000, require_success=True, gpu=-1, load_model=False, num_envs=4): env, _ = self.make_vec_env_and_successful_return(test=False, num_envs=num_envs) test_env, successful_return = self.make_vec_env_and_successful_return( test=True, num_envs=num_envs) agent = self.make_agent(env, gpu) max_episode_len = None if self.episodic else 2 if load_model: print("Load agent from", self.agent_dirname) agent.load(self.agent_dirname) # Train train_agent_batch_with_evaluation( agent=agent, env=env, steps=steps, outdir=self.tmpdir, eval_interval=200, eval_n_steps=None, eval_n_episodes=40, successful_score=successful_return, eval_env=test_env, log_interval=100, max_episode_len=max_episode_len, ) env.close() # Test n_test_runs = 10 eval_returns, _ = batch_run_evaluation_episodes( test_env, agent, n_steps=None, n_episodes=n_test_runs, max_episode_len=max_episode_len, ) test_env.close() if require_success: n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return) assert n_succeeded == n_test_runs # Save agent.save(self.agent_dirname)
def _test_batch_training(self, gpu, steps=5000, load_model=False, require_success=True): random_seed.set_random_seed(1) logging.basicConfig(level=logging.DEBUG) env, _ = self.make_vec_env_and_successful_return(test=False) test_env, successful_return = self.make_vec_env_and_successful_return( test=True) agent = self.make_agent(env, gpu) if load_model: print("Load agent from", self.agent_dirname) agent.load(self.agent_dirname) agent.replay_buffer.load(self.rbuf_filename) # Train train_agent_batch_with_evaluation( agent=agent, env=env, steps=steps, outdir=self.tmpdir, eval_interval=200, eval_n_steps=None, eval_n_episodes=5, successful_score=1, eval_env=test_env, ) env.close() # Test n_test_runs = 5 eval_returns, _ = batch_run_evaluation_episodes( test_env, agent, n_steps=None, n_episodes=n_test_runs, ) test_env.close() n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return) if require_success: assert n_succeeded == n_test_runs # Save agent.save(self.agent_dirname) agent.replay_buffer.save(self.rbuf_filename)
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument( "--env", type=str, default="RoboschoolAtlasForwardWalk-v1", help="OpenAI Gym env to perform algorithm on.", ) parser.add_argument("--num-envs", type=int, default=4, help="Number of envs run in parallel.") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument("--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU.") parser.add_argument("--load", type=str, default="", help="Directory to load agent from.") parser.add_argument( "--steps", type=int, default=10**7, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--eval-n-runs", type=int, default=20, help="Number of episodes run for each evaluation.", ) parser.add_argument( "--eval-interval", type=int, default=100000, help="Interval in timesteps between evaluations.", ) parser.add_argument( "--replay-start-size", type=int, default=10000, help="Minimum replay buffer size before " + "performing gradient updates.", ) parser.add_argument( "--update-interval", type=int, default=1, help="Interval in timesteps between model updates.", ) parser.add_argument("--batch-size", type=int, default=256, help="Minibatch size") parser.add_argument("--render", action="store_true", help="Render env states in a GUI window.") parser.add_argument("--demo", action="store_true", help="Just run evaluation, not training.") parser.add_argument("--monitor", action="store_true", help="Wrap env with Monitor to write videos.") parser.add_argument( "--log-interval", type=int, default=1000, help= "Interval in timesteps between outputting log messages during training", ) parser.add_argument("--log-level", type=int, default=logging.INFO, help="Level of the root logger.") parser.add_argument( "--n-hidden-channels", type=int, default=1024, help="Number of hidden channels of NN models.", ) parser.add_argument("--discount", type=float, default=0.98, help="Discount factor.") parser.add_argument("--n-step-return", type=int, default=3, help="N-step return.") parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.") parser.add_argument("--adam-eps", type=float, default=1e-1, help="Adam eps.") args = parser.parse_args() logging.basicConfig(level=args.log_level) args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print("Output files are saved in {}".format(args.outdir)) # Set a random seed used in PFRL utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**32 def make_batch_env(test): return pfrl.envs.MultiprocessVectorEnv([ functools.partial(make_env, args, process_seeds[idx], test) for idx, env in enumerate(range(args.num_envs)) ]) sample_env = make_env(args, process_seeds[0], test=False) timestep_limit = sample_env.spec.max_episode_steps obs_space = sample_env.observation_space action_space = sample_env.action_space print("Observation space:", obs_space) print("Action space:", action_space) del sample_env action_size = action_space.low.size def squashed_diagonal_gaussian_head(x): assert x.shape[-1] == action_size * 2 mean, log_scale = torch.chunk(x, 2, dim=1) log_scale = torch.clamp(log_scale, -20.0, 2.0) var = torch.exp(log_scale * 2) base_distribution = distributions.Independent( distributions.Normal(loc=mean, scale=torch.sqrt(var)), 1) # cache_size=1 is required for numerical stability return distributions.transformed_distribution.TransformedDistribution( base_distribution, [distributions.transforms.TanhTransform(cache_size=1)]) policy = nn.Sequential( nn.Linear(obs_space.low.size, args.n_hidden_channels), nn.ReLU(), nn.Linear(args.n_hidden_channels, args.n_hidden_channels), nn.ReLU(), nn.Linear(args.n_hidden_channels, action_size * 2), Lambda(squashed_diagonal_gaussian_head), ) torch.nn.init.xavier_uniform_(policy[0].weight) torch.nn.init.xavier_uniform_(policy[2].weight) torch.nn.init.xavier_uniform_(policy[4].weight) policy_optimizer = torch.optim.Adam(policy.parameters(), lr=args.lr, eps=args.adam_eps) def make_q_func_with_optimizer(): q_func = nn.Sequential( pfrl.nn.ConcatObsAndAction(), nn.Linear(obs_space.low.size + action_size, args.n_hidden_channels), nn.ReLU(), nn.Linear(args.n_hidden_channels, args.n_hidden_channels), nn.ReLU(), nn.Linear(args.n_hidden_channels, 1), ) torch.nn.init.xavier_uniform_(q_func[1].weight) torch.nn.init.xavier_uniform_(q_func[3].weight) torch.nn.init.xavier_uniform_(q_func[5].weight) q_func_optimizer = torch.optim.Adam(q_func.parameters(), lr=args.lr, eps=args.adam_eps) return q_func, q_func_optimizer q_func1, q_func1_optimizer = make_q_func_with_optimizer() q_func2, q_func2_optimizer = make_q_func_with_optimizer() rbuf = replay_buffers.ReplayBuffer(10**6, num_steps=args.n_step_return) def burnin_action_func(): """Select random actions until model is updated one or more times.""" return np.random.uniform(action_space.low, action_space.high).astype(np.float32) # Hyperparameters in http://arxiv.org/abs/1802.09477 agent = pfrl.agents.SoftActorCritic( policy, q_func1, q_func2, policy_optimizer, q_func1_optimizer, q_func2_optimizer, rbuf, gamma=args.discount, update_interval=args.update_interval, replay_start_size=args.replay_start_size, gpu=args.gpu, minibatch_size=args.batch_size, burnin_action_func=burnin_action_func, entropy_target=-action_size, temperature_optimizer_lr=args.lr, ) if len(args.load) > 0: agent.load(args.load) if args.demo: eval_env = make_env(args, seed=0, test=True) eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(test=False), eval_env=make_batch_env(test=True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, log_interval=args.log_interval, max_episode_len=timestep_limit, )
def main(): import logging parser = argparse.ArgumentParser() parser.add_argument( "--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU." ) parser.add_argument( "--env", type=str, default="reach_target-ee-vision-v0", help="OpenAI Gym MuJoCo env to perform algorithm on.", ) parser.add_argument( "--num-envs", type=int, default=1, help="Number of envs run in parallel." ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument( "--outdir", type=str, default="results", help=( "Directory path to save output files." " If it does not exist, it will be created." ), ) parser.add_argument( "--steps", type=int, default=2 * 10 ** 6, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--eval-interval", type=int, default=100000, help="Interval in timesteps between evaluations.", ) parser.add_argument( "--eval-n-runs", type=int, default=100, help="Number of episodes run for each evaluation.", ) parser.add_argument( "--render", action="store_true", help="Render env states in a GUI window." ) parser.add_argument( "--demo", action="store_true", help="Just run evaluation, not training." ) parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument( "--load", type=str, default="", help="Directory to load agent from." ) parser.add_argument( "--log-level", type=int, default=logging.INFO, help="Level of the root logger." ) parser.add_argument( "--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor." ) parser.add_argument( "--log-interval", type=int, default=1000, help="Interval in timesteps between outputting log messages during training", ) parser.add_argument( "--update-interval", type=int, default=2048, help="Interval in timesteps between model updates.", ) parser.add_argument( "--epochs", type=int, default=10, help="Number of epochs to update model for per PPO iteration.", ) parser.add_argument( "--action-size", type=int, default=3, help="Action size (needs to match env.action_space)", ) parser.add_argument("--batch-size", type=int, default=64, help="Minibatch size") args = parser.parse_args() logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2 ** 32 args.outdir = experiments.prepare_output_dir(args, args.outdir) def make_env(process_idx, test): render_mode = 'human' if args.render else None env = NormalizeAction(GraspActionWrapper(FlattenObservation(ResizeObservation(WristObsWrapper(gym.make(args.env, render_mode=render_mode)), (64, 64))), args.action_size)) # env = GraspActionWrapper(RescaleAction(FlattenObservation(ResizeObservation(WristObsWrapper(gym.make(args.env)), (64, 64))), -0.5, 0.5)) # Use different random seeds for train and test envs process_seed = int(process_seeds[process_idx]) env_seed = 2 ** 32 - 1 - process_seed if test else process_seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = pfrl.wrappers.Monitor(env, args.outdir) if args.render: env = pfrl.wrappers.Render(env) return env def make_batch_env(test): return MultiprocessVectorEnv( [ functools.partial(make_env, idx, test) for idx, env in enumerate(range(args.num_envs)) ] ) # Only for getting timesteps, and obs-action spaces # sample_env = RescaleAction(GraspActionWrapper(FlattenObservation(ResizeObservation(WristObsWrapper(gym.make(args.env)), (64, 64))), args.action_size), -0.5, 0.5) # timestep_limit = sample_env.spec.max_episode_steps timestep_limit = 200 # obs_space = sample_env.observation_space obs_space = spaces.Box(low=0, high=1, shape=(64 * 64 * 3,)) # action_space = sample_env.action_space action_space = spaces.Box(low=-1.0, high=1.0, shape=(args.action_size,)) print("Observation space:", obs_space) print("Action space:", action_space) # assert obs_space == spaces.Box(low=0, high=1, shape=(64 * 64 * 3,)) # assert action_space == spaces.Box(low=-1.0, high=1.0, shape=(args.action_size,)) # sample_env.close() assert isinstance(action_space, gym.spaces.Box) # Normalize observations based on their empirical mean and variance obs_normalizer = pfrl.nn.EmpiricalNormalization( obs_space.low.size, clip_threshold=5 ) obs_size = obs_space.low.size action_size = action_space.low.size policy = torch.nn.Sequential( nn.Linear(obs_size, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, action_size), pfrl.policies.GaussianHeadWithStateIndependentCovariance( action_size=action_size, var_type="diagonal", var_func=lambda x: torch.exp(2 * x), # Parameterize log std var_param_init=0, # log std = 0 => std = 1 ), ) vf = torch.nn.Sequential( nn.Linear(obs_size, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, 1), ) # While the original paper initialized weights by normal distribution, # we use orthogonal initialization as the latest openai/baselines does. def ortho_init(layer, gain): nn.init.orthogonal_(layer.weight, gain=gain) nn.init.zeros_(layer.bias) ortho_init(policy[0], gain=1) ortho_init(policy[2], gain=1) ortho_init(policy[4], gain=1e-2) ortho_init(vf[0], gain=1) ortho_init(vf[2], gain=1) ortho_init(vf[4], gain=1) # Combine a policy and a value function into a single model model = pfrl.nn.Branched(policy, vf) opt = torch.optim.Adam(model.parameters(), lr=3e-4, eps=1e-5) agent = PPO( model, opt, obs_normalizer=obs_normalizer, gpu=args.gpu, update_interval=args.update_interval, minibatch_size=args.batch_size, epochs=args.epochs, clip_eps_vf=None, entropy_coef=0, standardize_advantages=True, gamma=0.995, lambd=0.97, ) if args.load or args.load_pretrained: if args.load_pretrained: raise Exception("Pretrained models are currently unsupported.") # either load or load_pretrained must be false assert not args.load or not args.load_pretrained if args.load: agent.load(args.load) else: agent.load(utils.download_model("PPO", args.env, model_type="final")[0]) if args.demo: env = make_batch_env(True) eval_stats = experiments.eval_performance( env=env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print( "n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], ) ) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(False), eval_env=make_batch_env(True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, log_interval=args.log_interval, max_episode_len=timestep_limit, save_best_so_far_agent=True, )
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument( "--env", type=str, default="Hopper-v2", help="OpenAI Gym MuJoCo env to perform algorithm on.", ) parser.add_argument("--num-envs", type=int, default=1, help="Number of envs run in parallel.") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument("--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU.") parser.add_argument("--load", type=str, default="", help="Directory to load agent from.") parser.add_argument( "--steps", type=int, default=10**6, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--eval-n-runs", type=int, default=10, help="Number of episodes run for each evaluation.", ) parser.add_argument( "--eval-interval", type=int, default=5000, help="Interval in timesteps between evaluations.", ) parser.add_argument( "--replay-start-size", type=int, default=10000, help="Minimum replay buffer size before " + "performing gradient updates.", ) parser.add_argument("--batch-size", type=int, default=256, help="Minibatch size") parser.add_argument("--render", action="store_true", help="Render env states in a GUI window.") parser.add_argument("--demo", action="store_true", help="Just run evaluation, not training.") parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--pretrained-type", type=str, default="best", choices=["best", "final"]) parser.add_argument("--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor.") parser.add_argument( "--log-interval", type=int, default=1000, help= "Interval in timesteps between outputting log messages during training", ) parser.add_argument("--log-level", type=int, default=logging.INFO, help="Level of the root logger.") parser.add_argument( "--policy-output-scale", type=float, default=1.0, help="Weight initialization scale of policy output.", ) parser.add_argument( "--optimizer", type=str, default="AdaBelief", ) args = parser.parse_args() logging.basicConfig(level=args.log_level) args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print("Output files are saved in {}".format(args.outdir)) # Set a random seed used in PFRL utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**32 def make_env(process_idx, test): env = gym.make(args.env) # Unwrap TimiLimit wrapper assert isinstance(env, gym.wrappers.TimeLimit) env = env.env # Use different random seeds for train and test envs process_seed = int(process_seeds[process_idx]) env_seed = 2**32 - 1 - process_seed if test else process_seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) # Normalize action space to [-1, 1]^n env = pfrl.wrappers.NormalizeActionSpace(env) if args.monitor: env = gym.wrappers.Monitor(env, args.outdir) if args.render: env = pfrl.wrappers.Render(env) return env def make_batch_env(test): return pfrl.envs.MultiprocessVectorEnv([ functools.partial(make_env, idx, test) for idx, env in enumerate(range(args.num_envs)) ]) sample_env = make_env(process_idx=0, test=False) timestep_limit = sample_env.spec.max_episode_steps obs_space = sample_env.observation_space action_space = sample_env.action_space print("Observation space:", obs_space) print("Action space:", action_space) obs_size = obs_space.low.size action_size = action_space.low.size if LooseVersion(torch.__version__) < LooseVersion("1.5.0"): raise Exception("This script requires a PyTorch version >= 1.5.0") def squashed_diagonal_gaussian_head(x): assert x.shape[-1] == action_size * 2 mean, log_scale = torch.chunk(x, 2, dim=1) log_scale = torch.clamp(log_scale, -20.0, 2.0) var = torch.exp(log_scale * 2) base_distribution = distributions.Independent( distributions.Normal(loc=mean, scale=torch.sqrt(var)), 1) # cache_size=1 is required for numerical stability return distributions.transformed_distribution.TransformedDistribution( base_distribution, [distributions.transforms.TanhTransform(cache_size=1)]) def make_optimizer(parameters): if args.optimizer == "OfficialAdaBelief": import adabelief_pytorch optim_class = adabelief_pytorch.AdaBelief optim = optim_class(parameters, betas=(0.9, 0.999), eps=1e-12) else: optim_class = getattr( torch_optimizer, args.optimizer, getattr(torch.optim, args.optimizer, None), ) optim = optim_class(parameters) assert optim_class is not None print(str(optim_class), "with default hyperparameters") return optim policy = nn.Sequential( nn.Linear(obs_size, 256), nn.ReLU(), nn.Linear(256, 256), nn.ReLU(), nn.Linear(256, action_size * 2), Lambda(squashed_diagonal_gaussian_head), ) torch.nn.init.xavier_uniform_(policy[0].weight) torch.nn.init.xavier_uniform_(policy[2].weight) torch.nn.init.xavier_uniform_(policy[4].weight, gain=args.policy_output_scale) policy_optimizer = make_optimizer(policy.parameters()) def make_q_func_with_optimizer(): q_func = nn.Sequential( pfrl.nn.ConcatObsAndAction(), nn.Linear(obs_size + action_size, 256), nn.ReLU(), nn.Linear(256, 256), nn.ReLU(), nn.Linear(256, 1), ) torch.nn.init.xavier_uniform_(q_func[1].weight) torch.nn.init.xavier_uniform_(q_func[3].weight) torch.nn.init.xavier_uniform_(q_func[5].weight) q_func_optimizer = make_optimizer(q_func.parameters()) return q_func, q_func_optimizer q_func1, q_func1_optimizer = make_q_func_with_optimizer() q_func2, q_func2_optimizer = make_q_func_with_optimizer() rbuf = replay_buffers.ReplayBuffer(10**6) def burnin_action_func(): """Select random actions until model is updated one or more times.""" return np.random.uniform(action_space.low, action_space.high).astype(np.float32) # Hyperparameters in http://arxiv.org/abs/1802.09477 agent = pfrl.agents.SoftActorCritic( policy, q_func1, q_func2, policy_optimizer, q_func1_optimizer, q_func2_optimizer, rbuf, gamma=0.99, replay_start_size=args.replay_start_size, gpu=args.gpu, minibatch_size=args.batch_size, burnin_action_func=burnin_action_func, entropy_target=-action_size, temperature_optimizer_lr=3e-4, ) if len(args.load) > 0 or args.load_pretrained: if args.load_pretrained: raise Exception("Pretrained models are currently unsupported.") # either load or load_pretrained must be false assert not len(args.load) > 0 or not args.load_pretrained if len(args.load) > 0: agent.load(args.load) else: agent.load( utils.download_model("SAC", args.env, model_type=args.pretrained_type)[0]) if args.demo: eval_stats = experiments.eval_performance( env=make_batch_env(test=True), agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(test=False), eval_env=make_batch_env(test=True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, log_interval=args.log_interval, max_episode_len=timestep_limit, )
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--outdir", type=str, default="results", help=( "Directory path to save output files." " If it does not exist, it will be created." ), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument( "--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU." ) parser.add_argument( "--demo", action="store_true", default=False, help="Evaluate the agent without training.", ) parser.add_argument( "--load", type=str, default=None, help="Load a saved agent from a given directory.", ) parser.add_argument( "--final-exploration-steps", type=int, default=5 * 10 ** 5, help="Timesteps after which we stop annealing exploration rate", ) parser.add_argument( "--final-epsilon", type=float, default=0.2, help="Final value of epsilon during training.", ) parser.add_argument( "--steps", type=int, default=2 * 10 ** 6, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--replay-start-size", type=int, default=5 * 10 ** 4, help="Minimum replay buffer size before performing gradient updates.", ) parser.add_argument( "--target-update-interval", type=int, default=1 * 10 ** 4, help="Frequency (in timesteps) at which the target network is updated.", ) parser.add_argument( "--eval-interval", type=int, default=10 ** 5, help="Frequency (in timesteps) of evaluation phase.", ) parser.add_argument( "--update-interval", type=int, default=1, help="Frequency (in timesteps) of network updates.", ) parser.add_argument( "--eval-n-runs", type=int, default=100, help="Number of episodes used for evaluation.", ) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument("--lr", type=float, default=6.25e-5, help="Learning rate") parser.add_argument( "--num-envs", type=int, default=1, help="Number of envs run in parallel." ) parser.add_argument( "--batch-size", type=int, default=32, help="Batch size used for training." ) parser.add_argument( "--record", action="store_true", default=False, help="Record videos of evaluation envs. --render should also be specified.", ) parser.add_argument("--gamma", type=float, default=0.99, help="Discount factor.") args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2 ** 32 args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) max_episode_steps = 8 def make_env(idx, test): from pybullet_envs.bullet.kuka_diverse_object_gym_env import ( KukaDiverseObjectEnv, ) # NOQA # Use different random seeds for train and test envs process_seed = int(process_seeds[idx]) env_seed = 2 ** 32 - 1 - process_seed if test else process_seed # Set a random seed for this subprocess utils.set_random_seed(env_seed) env = KukaDiverseObjectEnv( isDiscrete=True, renders=args.render and (args.demo or not test), height=84, width=84, maxSteps=max_episode_steps, isTest=test, ) # Disable file caching to keep memory usage small env._p.setPhysicsEngineParameter(enableFileCaching=False) assert env.observation_space is None env.observation_space = gym.spaces.Box( low=0, high=255, shape=(84, 84, 3), dtype=np.uint8 ) # (84, 84, 3) -> (3, 84, 84) env = TransposeObservation(env, (2, 0, 1)) env = ObserveElapsedSteps(env, max_episode_steps) # KukaDiverseObjectEnv internally asserts int actions env = CastAction(env, int) env.seed(int(env_seed)) if test and args.record: assert args.render, "To use --record, --render needs be specified." video_dir = os.path.join(args.outdir, "video_{}".format(idx)) os.mkdir(video_dir) env = RecordMovie(env, video_dir) return env def make_batch_env(test): return pfrl.envs.MultiprocessVectorEnv( [functools.partial(make_env, idx, test) for idx in range(args.num_envs)] ) eval_env = make_batch_env(test=True) n_actions = eval_env.action_space.n q_func = GraspingQFunction(n_actions, max_episode_steps) # Use the hyper parameters of the Nature paper opt = pfrl.optimizers.RMSpropEpsInsideSqrt( q_func.parameters(), lr=args.lr, alpha=0.95, momentum=0.0, eps=1e-2, centered=True, ) # Anneal beta from beta0 to 1 throughout training betasteps = args.steps / args.update_interval rbuf = replay_buffers.PrioritizedReplayBuffer( 10 ** 6, alpha=0.6, beta0=0.4, betasteps=betasteps ) explorer = explorers.LinearDecayEpsilonGreedy( 1.0, args.final_epsilon, args.final_exploration_steps, lambda: np.random.randint(n_actions), ) def phi(x): # Feature extractor image, elapsed_steps = x # Normalize RGB values: [0, 255] -> [0, 1] norm_image = np.asarray(image, dtype=np.float32) / 255 return norm_image, elapsed_steps agent = pfrl.agents.DoubleDQN( q_func, opt, rbuf, gpu=args.gpu, gamma=args.gamma, explorer=explorer, minibatch_size=args.batch_size, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, update_interval=args.update_interval, batch_accumulator="sum", phi=phi, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs ) print( "n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], ) ) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(test=False), eval_env=eval_env, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=False, log_interval=1000, )
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4", help="Gym Env ID.") parser.add_argument("--gpu", type=int, default=0, help="GPU device ID. Set to -1 to use CPUs only.") parser.add_argument( "--num-envs", type=int, default=8, help="Number of env instances run in parallel.", ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--steps", type=int, default=10**7, help="Total time steps for training.") parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--lr", type=float, default=2.5e-4, help="Learning rate.") parser.add_argument( "--eval-interval", type=int, default=100000, help="Interval (in timesteps) between evaluation phases.", ) parser.add_argument( "--eval-n-runs", type=int, default=10, help="Number of episodes ran in an evaluation phase.", ) parser.add_argument( "--demo", action="store_true", default=False, help="Run demo episodes, not training.", ) parser.add_argument( "--load", type=str, default="", help=("Directory path to load a saved agent data from" " if it is a non-empty string."), ) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument( "--update-interval", type=int, default=128 * 8, help="Interval (in timesteps) between PPO iterations.", ) parser.add_argument( "--batchsize", type=int, default=32 * 8, help="Size of minibatch (in timesteps).", ) parser.add_argument( "--epochs", type=int, default=4, help="Number of epochs used for each PPO iteration.", ) parser.add_argument( "--log-interval", type=int, default=10000, help="Interval (in timesteps) of printing logs.", ) parser.add_argument( "--recurrent", action="store_true", default=False, help="Use a recurrent model. See the code for the model definition.", ) parser.add_argument( "--flicker", action="store_true", default=False, help=("Use so-called flickering Atari, where each" " screen is blacked out with probability 0.5."), ) parser.add_argument( "--no-frame-stack", action="store_true", default=False, help= ("Disable frame stacking so that the agent can only see the current screen." ), ) parser.add_argument( "--checkpoint-frequency", type=int, default=None, help="Frequency at which agents are stored.", ) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**32 args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(idx, test): # Use different random seeds for train and test envs process_seed = int(process_seeds[idx]) env_seed = 2**32 - 1 - process_seed if test else process_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, flicker=args.flicker, frame_stack=False, ) env.seed(env_seed) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env def make_batch_env(test): vec_env = pfrl.envs.MultiprocessVectorEnv([ (lambda: make_env(idx, test)) for idx, env in enumerate(range(args.num_envs)) ]) if not args.no_frame_stack: vec_env = pfrl.wrappers.VectorFrameStack(vec_env, 4) return vec_env sample_env = make_batch_env(test=False) print("Observation space", sample_env.observation_space) print("Action space", sample_env.action_space) n_actions = sample_env.action_space.n obs_n_channels = sample_env.observation_space.low.shape[0] del sample_env def lecun_init(layer, gain=1): if isinstance(layer, (nn.Conv2d, nn.Linear)): pfrl.initializers.init_lecun_normal(layer.weight, gain) nn.init.zeros_(layer.bias) else: pfrl.initializers.init_lecun_normal(layer.weight_ih_l0, gain) pfrl.initializers.init_lecun_normal(layer.weight_hh_l0, gain) nn.init.zeros_(layer.bias_ih_l0) nn.init.zeros_(layer.bias_hh_l0) return layer if args.recurrent: model = pfrl.nn.RecurrentSequential( lecun_init(nn.Conv2d(obs_n_channels, 32, 8, stride=4)), nn.ReLU(), lecun_init(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), lecun_init(nn.Conv2d(64, 64, 3, stride=1)), nn.ReLU(), nn.Flatten(), lecun_init(nn.Linear(3136, 512)), nn.ReLU(), lecun_init(nn.GRU(num_layers=1, input_size=512, hidden_size=512)), pfrl.nn.Branched( nn.Sequential( lecun_init(nn.Linear(512, n_actions), 1e-2), SoftmaxCategoricalHead(), ), lecun_init(nn.Linear(512, 1)), ), ) else: model = nn.Sequential( lecun_init(nn.Conv2d(obs_n_channels, 32, 8, stride=4)), nn.ReLU(), lecun_init(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), lecun_init(nn.Conv2d(64, 64, 3, stride=1)), nn.ReLU(), nn.Flatten(), lecun_init(nn.Linear(3136, 512)), nn.ReLU(), pfrl.nn.Branched( nn.Sequential( lecun_init(nn.Linear(512, n_actions), 1e-2), SoftmaxCategoricalHead(), ), lecun_init(nn.Linear(512, 1)), ), ) opt = torch.optim.Adam(model.parameters(), lr=args.lr, eps=1e-5) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 agent = PPO( model, opt, gpu=args.gpu, phi=phi, update_interval=args.update_interval, minibatch_size=args.batchsize, epochs=args.epochs, clip_eps=0.1, clip_eps_vf=None, standardize_advantages=True, entropy_coef=1e-2, recurrent=args.recurrent, max_grad_norm=0.5, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=make_batch_env(test=True), agent=agent, n_steps=None, n_episodes=args.eval_n_runs, ) print("n_runs: {} mean: {} median: {} stdev: {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: step_hooks = [] # Linearly decay the learning rate to zero def lr_setter(env, agent, value): for param_group in agent.optimizer.param_groups: param_group["lr"] = value step_hooks.append( experiments.LinearInterpolationHook(args.steps, args.lr, 0, lr_setter)) experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(False), eval_env=make_batch_env(True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, checkpoint_freq=args.checkpoint_frequency, eval_interval=args.eval_interval, log_interval=args.log_interval, save_best_so_far_agent=False, step_hooks=step_hooks, )
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default=None) parser.add_argument("--final-exploration-frames", type=int, default=10**6) parser.add_argument("--final-epsilon", type=float, default=0.01) parser.add_argument("--eval-epsilon", type=float, default=0.001) parser.add_argument("--noisy-net-sigma", type=float, default=None) parser.add_argument( "--arch", type=str, default="doubledqn", choices=["nature", "nips", "dueling", "doubledqn"], ) parser.add_argument("--steps", type=int, default=5 * 10**7) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--replay-start-size", type=int, default=5 * 10**4) parser.add_argument("--target-update-interval", type=int, default=3 * 10**4) parser.add_argument("--eval-interval", type=int, default=10**5) parser.add_argument("--update-interval", type=int, default=4) parser.add_argument("--eval-n-runs", type=int, default=10) parser.add_argument("--no-clip-delta", dest="clip_delta", action="store_false") parser.set_defaults(clip_delta=True) parser.add_argument("--agent", type=str, default="DoubleDQN", choices=["DQN", "DoubleDQN", "PAL"]) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument("--lr", type=float, default=2.5e-4, help="Learning rate") parser.add_argument( "--prioritized", action="store_true", default=False, help="Use prioritized experience replay.", ) parser.add_argument("--num-envs", type=int, default=1) parser.add_argument("--n-step-return", type=int, default=1) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**32 args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(idx, test): # Use different random seeds for train and test envs process_seed = int(process_seeds[idx]) env_seed = 2**32 - 1 - process_seed if test else process_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, frame_stack=False, ) if test: # Randomize actions like epsilon-greedy in evaluation as well env = pfrl.wrappers.RandomizeAction(env, args.eval_epsilon) env.seed(env_seed) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env def make_batch_env(test): vec_env = pfrl.envs.MultiprocessVectorEnv([ functools.partial(make_env, idx, test) for idx, env in enumerate(range(args.num_envs)) ]) vec_env = pfrl.wrappers.VectorFrameStack(vec_env, 4) return vec_env sample_env = make_env(0, test=False) n_actions = sample_env.action_space.n q_func = parse_arch(args.arch, n_actions) if args.noisy_net_sigma is not None: pnn.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma) # Turn off explorer explorer = explorers.Greedy() # Use the same hyper parameters as the Nature paper's opt = optim.RMSprop( q_func.parameters(), lr=args.lr, alpha=0.95, momentum=0.0, eps=1e-2, centered=True, ) # Select a replay buffer to use if args.prioritized: # Anneal beta from beta0 to 1 throughout training betasteps = args.steps / args.update_interval rbuf = replay_buffers.PrioritizedReplayBuffer( 10**6, alpha=0.6, beta0=0.4, betasteps=betasteps, num_steps=args.n_step_return, ) else: rbuf = replay_buffers.ReplayBuffer(10**6, num_steps=args.n_step_return) explorer = explorers.LinearDecayEpsilonGreedy( 1.0, args.final_epsilon, args.final_exploration_frames, lambda: np.random.randint(n_actions), ) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 Agent = parse_agent(args.agent) agent = Agent( q_func, opt, rbuf, gpu=args.gpu, gamma=0.99, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, clip_delta=args.clip_delta, update_interval=args.update_interval, batch_accumulator="sum", phi=phi, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=make_batch_env(test=True), agent=agent, n_steps=None, n_episodes=args.eval_n_runs, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(test=False), eval_env=make_batch_env(test=True), steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=False, log_interval=1000, )
def main(): import logging torch.cuda.empty_cache() parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=0, help='GPU to use, set to -1 if no GPU') parser.add_argument('--env', type=str, default='LidarBat-v0', help='Bat simulation env') parser.add_argument('--arch', type=str, default='FFGaussian', choices=('FFSoftmax', 'FFMellowmax', 'FFGaussian')) parser.add_argument('--bound-mean', action='store_true') parser.add_argument('--seed', type=int, default=0, help='Random seed [0, 2 ** 32)') parser.add_argument('--outdir', type=str, default='data/ppo', help='Directory path to save output files.' ' If it does not exist, it will be created.') parser.add_argument('--steps', type=int, default=10**6) parser.add_argument('--eval-interval', type=int, default=10000) parser.add_argument('--eval-n-runs', type=int, default=10) parser.add_argument('--reward-scale-factor', type=float, default=1e-2) parser.add_argument('--standardize-advantages', action='store_true') parser.add_argument('--render', action='store_true', default=False) parser.add_argument('--lr', type=float, default=3e-4) parser.add_argument('--weight-decay', type=float, default=0.0) parser.add_argument('--demo', action='store_true', default=False) parser.add_argument('--load', type=str, default='') parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument('--logger-level', type=int, default=logging.DEBUG) parser.add_argument('--monitor', action='store_true') parser.add_argument( "--log-interval", type=int, default=1000, help= "Interval in timesteps between outputting log messages during training", ) parser.add_argument("--num-envs", type=int, default=1, help="Number of envs run in parallel.") parser.add_argument("--batch-size", type=int, default=64, help="Minibatch size") parser.add_argument('--update-interval', type=int, default=2048) parser.add_argument('--batchsize', type=int, default=64) parser.add_argument('--epochs', type=int, default=10) parser.add_argument('--entropy-coef', type=float, default=0.0) args = parser.parse_args() logging.basicConfig(level=args.logger_level) # Set a random seed used in PFRL utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**32 args.outdir = experiments.prepare_output_dir(args, args.outdir) def make_env(process_idx, test): env = gym.make(args.env) # Use different random seeds for train and test envs process_seed = int(process_seeds[process_idx]) env_seed = 2**32 - 1 - process_seed if test else process_seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = pfrl.wrappers.Monitor(env, args.outdir) # TODO # if not test is not here if args.render: env = pfrl.wrappers.Render(env) return env def make_batch_env(test): return pfrl.envs.MultiprocessVectorEnv([ functools.partial(make_env, idx, test) for idx, env in enumerate(range(args.num_envs)) ]) # Only for getting timesteps, and obs-action spaces sample_env = gym.make(args.env) timestep_limit = sample_env.spec.max_episode_steps obs_space = sample_env.observation_space action_space = sample_env.action_space print("Observation space:", obs_space) print("Action space:", action_space) assert isinstance(action_space, gym.spaces.Box) # Normalize observations based on their empirical mean and variance obs_normalizer = pfrl.nn.EmpiricalNormalization(obs_space.low.size, clip_threshold=5) # pulicy here magic number must be concidered again obs_size = obs_space.low.size action_size = action_space.low.size policy = torch.nn.Sequential( nn.Linear(obs_size, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, action_size), pfrl.policies.GaussianHeadWithStateIndependentCovariance( action_size=action_size, var_type="diagonal", var_func=lambda x: torch.exp(2 * x), # Parameterize log std var_param_init=0, # log std = 0 => std = 1 ), ) vf = torch.nn.Sequential( nn.Linear(obs_size, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, 1), ) # While the original paper initialized weights by normal distribution, # we use orthogonal initialization as the latest openai/baselines does. def ortho_init(layer, gain): nn.init.orthogonal_(layer.weight, gain=gain) nn.init.zeros_(layer.bias) ortho_init(policy[0], gain=1) ortho_init(policy[2], gain=1) ortho_init(policy[4], gain=1e-2) ortho_init(vf[0], gain=1) ortho_init(vf[2], gain=1) ortho_init(vf[4], gain=1) # Combine a policy and a value function into a single model model = pfrl.nn.Branched(policy, vf) opt = torch.optim.Adam(model.parameters(), lr=args.lr, eps=1e-5) agent = PPO( model, opt, obs_normalizer=obs_normalizer, gpu=args.gpu, update_interval=args.update_interval, minibatch_size=args.batch_size, epochs=args.epochs, clip_eps_vf=None, entropy_coef=args.entropy_coef, standardize_advantages=True, gamma=0.995, lambd=0.97, ) if args.load or args.load_pretrained: if args.load_pretrained: raise Exception("Pretrained models are currently unsupported.") # either load or load_pretrained must be false assert not args.load or not args.load_pretrained if args.load: agent.load(args.load) else: agent.load( utils.download_model("PPO", args.env, model_type="final")[0]) if args.demo: env = make_batch_env(True) eval_stats = experiments.eval_performance( env=env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(False), eval_env=make_batch_env(True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, log_interval=args.log_interval, max_episode_len=timestep_limit, save_best_so_far_agent=False, )
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--outdir", type=str, default="results") parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--steps", type=int, default=8 * 10**7) parser.add_argument("--update-steps", type=int, default=5) parser.add_argument("--lr", type=float, default=7e-4) parser.add_argument("--gamma", type=float, default=0.99, help="discount factor") parser.add_argument("--rmsprop-epsilon", type=float, default=1e-5) parser.add_argument( "--use-gae", action="store_true", default=False, help="use generalized advantage estimation", ) parser.add_argument("--tau", type=float, default=0.95, help="gae parameter") parser.add_argument("--alpha", type=float, default=0.99, help="RMSprop optimizer alpha") parser.add_argument("--eval-interval", type=int, default=10**6) parser.add_argument("--eval-n-runs", type=int, default=10) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default="") parser.add_argument("--max-grad-norm", type=float, default=40, help="value loss coefficient") parser.add_argument( "--gpu", "-g", type=int, default=-1, help="GPU ID (negative value indicates CPU)", ) parser.add_argument("--num-envs", type=int, default=1) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.set_defaults(use_lstm=False) args = parser.parse_args() logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. # If you use more than one processes, the results will be no longer # deterministic even with the same random seed. utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**31 args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(process_idx, test): # Use different random seeds for train and test envs process_seed = process_seeds[process_idx] env_seed = 2**31 - 1 - process_seed if test else process_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env def make_batch_env(test): return pfrl.envs.MultiprocessVectorEnv([ functools.partial(make_env, idx, test) for idx, env in enumerate(range(args.num_envs)) ]) sample_env = make_env(0, test=False) obs_channel_size = sample_env.observation_space.low.shape[0] n_actions = sample_env.action_space.n model = nn.Sequential( nn.Conv2d(obs_channel_size, 16, 8, stride=4), nn.ReLU(), nn.Conv2d(16, 32, 4, stride=2), nn.ReLU(), nn.Flatten(), nn.Linear(2592, 256), nn.ReLU(), pfrl.nn.Branched( nn.Sequential( nn.Linear(256, n_actions), SoftmaxCategoricalHead(), ), nn.Linear(256, 1), ), ) optimizer = pfrl.optimizers.RMSpropEpsInsideSqrt( model.parameters(), lr=args.lr, eps=args.rmsprop_epsilon, alpha=args.alpha, ) agent = a2c.A2C( model, optimizer, gamma=args.gamma, gpu=args.gpu, num_processes=args.num_envs, update_steps=args.update_steps, phi=phi, use_gae=args.use_gae, tau=args.tau, max_grad_norm=args.max_grad_norm, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=make_batch_env(test=True), agent=agent, n_steps=None, n_episodes=args.eval_n_runs, ) print("n_runs: {} mean: {} median: {} stdev: {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(test=False), eval_env=make_batch_env(test=True), steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=False, log_interval=1000, )