def _test_training(self, gpu, steps=5000, load_model=False, require_success=True): random_seed.set_random_seed(1) logging.basicConfig(level=logging.DEBUG) env = self.make_env_and_successful_return(test=False)[0] test_env, successful_return = self.make_env_and_successful_return( test=True) agent = self.make_agent(env, gpu) if load_model: print("Load agent from", self.agent_dirname) agent.load(self.agent_dirname) agent.replay_buffer.load(self.rbuf_filename) # Train train_agent_with_evaluation( agent=agent, env=env, steps=steps, outdir=self.tmpdir, eval_interval=200, eval_n_steps=None, eval_n_episodes=5, successful_score=1, eval_env=test_env, ) # Test n_test_runs = 5 eval_returns = run_evaluation_episodes( test_env, agent, n_steps=None, n_episodes=n_test_runs, ) n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return) if require_success: assert n_succeeded == n_test_runs # Save agent.save(self.agent_dirname) agent.replay_buffer.save(self.rbuf_filename)
def _test_abc(self, steps=100000, require_success=True, gpu=-1, load_model=False): env, _ = self.make_env_and_successful_return(test=False) test_env, successful_return = self.make_env_and_successful_return( test=True) agent = self.make_agent(env, gpu) if load_model: print("Load agent from", self.agent_dirname) agent.load(self.agent_dirname) max_episode_len = None if self.episodic else 2 # Train train_agent_with_evaluation( agent=agent, env=env, eval_env=test_env, steps=steps, outdir=self.tmpdir, eval_interval=200, eval_n_steps=None, eval_n_episodes=5, successful_score=successful_return, train_max_episode_len=max_episode_len, ) # Test n_test_runs = 5 eval_returns, _ = run_evaluation_episodes( test_env, agent, n_steps=None, n_episodes=n_test_runs, max_episode_len=max_episode_len, ) if require_success: n_succeeded = np.sum(np.asarray(eval_returns) >= successful_return) assert n_succeeded == n_test_runs # Save agent.save(self.agent_dirname)
replay_start_size=5 * 10**4, # replay_start_size=5*10**3, target_update_interval=10**4, clip_delta=True, update_interval=4, batch_accumulator="sum", phi=phi, gpu=0, episodic_update_len=4, recurrent=True) if ns.demo: agent.load(ns.model) test_env = pfrl.wrappers.Render(test_env) test_env = sleep_wrapper.SleepWrapper(test_env, ns.sleep) experiments.eval_performance(env=test_env, agent=agent, n_steps=100000, n_episodes=None) else: experiments.train_agent_with_evaluation(agent=agent, env=env, steps=ns.steps, eval_n_steps=None, checkpoint_freq=None, eval_n_episodes=10, eval_interval=100000, outdir=ns.model, eval_env=test_env, use_tensorboard=True)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default=None) parser.add_argument("--final-exploration-frames", type=int, default=10**6) parser.add_argument("--final-epsilon", type=float, default=0.1) parser.add_argument("--eval-epsilon", type=float, default=0.05) parser.add_argument("--steps", type=int, default=10**7) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--replay-start-size", type=int, default=5 * 10**4) parser.add_argument("--target-update-interval", type=int, default=10**4) parser.add_argument("--eval-interval", type=int, default=10**5) parser.add_argument("--update-interval", type=int, default=4) parser.add_argument("--eval-n-runs", type=int, default=10) parser.add_argument("--batch-size", type=int, default=32) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2**31 - 1 - args.seed args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(test): # Use different random seeds for train and test envs env_seed = test_seed if test else train_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if test: # Randomize actions like epsilon-greedy in evaluation as well env = pfrl.wrappers.RandomizeAction(env, args.eval_epsilon) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env env = make_env(test=False) eval_env = make_env(test=True) n_actions = env.action_space.n n_atoms = 51 v_max = 10 v_min = -10 q_func = torch.nn.Sequential( pfrl.nn.LargeAtariCNN(), pfrl.q_functions.DistributionalFCStateQFunctionWithDiscreteAction( 512, n_actions, n_atoms, v_min, v_max, n_hidden_channels=0, n_hidden_layers=0, ), ) # Use the same hyper parameters as https://arxiv.org/abs/1707.06887 opt = torch.optim.Adam(q_func.parameters(), 2.5e-4, eps=1e-2 / args.batch_size) rbuf = replay_buffers.ReplayBuffer(10**6) explorer = explorers.LinearDecayEpsilonGreedy( 1.0, args.final_epsilon, args.final_exploration_frames, lambda: np.random.randint(n_actions), ) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 agent = pfrl.agents.CategoricalDQN( q_func, opt, rbuf, gpu=args.gpu, gamma=0.99, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, update_interval=args.update_interval, batch_accumulator="mean", phi=phi, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance(env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=False, eval_env=eval_env, )
def main(): import logging logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--env", type=str, default="CartPole-v1") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--final-exploration-steps", type=int, default=1000) parser.add_argument("--start-epsilon", type=float, default=1.0) parser.add_argument("--end-epsilon", type=float, default=0.1) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default=None) parser.add_argument("--steps", type=int, default=10**8) parser.add_argument("--prioritized-replay", action="store_true") parser.add_argument("--replay-start-size", type=int, default=50) parser.add_argument("--target-update-interval", type=int, default=100) parser.add_argument("--target-update-method", type=str, default="hard") parser.add_argument("--soft-update-tau", type=float, default=1e-2) parser.add_argument("--update-interval", type=int, default=1) parser.add_argument("--eval-n-runs", type=int, default=100) parser.add_argument("--eval-interval", type=int, default=1000) parser.add_argument("--n-hidden-channels", type=int, default=12) parser.add_argument("--n-hidden-layers", type=int, default=3) parser.add_argument("--gamma", type=float, default=0.95) parser.add_argument("--minibatch-size", type=int, default=None) parser.add_argument("--render-train", action="store_true") parser.add_argument("--render-eval", action="store_true") parser.add_argument("--monitor", action="store_true") parser.add_argument("--reward-scale-factor", type=float, default=1.0) args = parser.parse_args() # Set a random seed used in PFRL utils.set_random_seed(args.seed) args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print("Output files are saved in {}".format(args.outdir)) def make_env(test): env = gym.make(args.env) env_seed = 2**32 - 1 - args.seed if test else args.seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = pfrl.wrappers.Monitor(env, args.outdir) if not test: # Scale rewards (and thus returns) to a reasonable range so that # training is easier env = pfrl.wrappers.ScaleReward(env, args.reward_scale_factor) if (args.render_eval and test) or (args.render_train and not test): env = pfrl.wrappers.Render(env) return env env = make_env(test=False) timestep_limit = env.spec.max_episode_steps obs_size = env.observation_space.low.size action_space = env.action_space n_atoms = 51 v_max = 500 v_min = 0 n_actions = action_space.n q_func = q_functions.DistributionalFCStateQFunctionWithDiscreteAction( obs_size, n_actions, n_atoms, v_min, v_max, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, ) # Use epsilon-greedy for exploration explorer = explorers.LinearDecayEpsilonGreedy( args.start_epsilon, args.end_epsilon, args.final_exploration_steps, action_space.sample, ) opt = torch.optim.Adam(q_func.parameters(), 1e-3) rbuf_capacity = 50000 # 5 * 10 ** 5 if args.minibatch_size is None: args.minibatch_size = 32 if args.prioritized_replay: betasteps = (args.steps - args.replay_start_size) // args.update_interval rbuf = replay_buffers.PrioritizedReplayBuffer(rbuf_capacity, betasteps=betasteps) else: rbuf = replay_buffers.ReplayBuffer(rbuf_capacity) agent = pfrl.agents.CategoricalDQN( q_func, opt, rbuf, gpu=args.gpu, gamma=args.gamma, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, update_interval=args.update_interval, minibatch_size=args.minibatch_size, target_update_method=args.target_update_method, soft_update_tau=args.soft_update_tau, ) if args.load: agent.load(args.load) eval_env = make_env(test=True) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, eval_env=eval_env, train_max_episode_len=timestep_limit, )
def _objective_core( # optuna parameters trial, # training parameters env_id, outdir, seed, monitor, gpu, steps, train_max_episode_len, eval_n_episodes, eval_interval, batch_size, # hyperparameters hyperparams, ): # Set a random seed used in PFRL utils.set_random_seed(seed) # Set different random seeds for train and test envs. train_seed = seed test_seed = 2**31 - 1 - seed def make_env(test=False): env = gym.make(env_id) if not isinstance(env.observation_space, gym.spaces.Box): raise ValueError( "Supported only Box observation environments, but given: {}".format( env.observation_space ) ) if len(env.observation_space.shape) != 1: raise ValueError( "Supported only observation spaces with ndim==1, but given: {}".format( env.observation_space.shape ) ) if not isinstance(env.action_space, gym.spaces.Discrete): raise ValueError( "Supported only discrete action environments, but given: {}".format( env.action_space ) ) env_seed = test_seed if test else train_seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) if monitor: env = pfrl.wrappers.Monitor(env, outdir) if not test: # Scale rewards (and thus returns) to a reasonable range so that # training is easier env = pfrl.wrappers.ScaleReward(env, hyperparams["reward_scale_factor"]) return env env = make_env(test=False) obs_space = env.observation_space obs_size = obs_space.low.size action_space = env.action_space n_actions = action_space.n # create model & q_function model = MLP( in_size=obs_size, out_size=n_actions, hidden_sizes=hyperparams["hidden_sizes"] ) q_func = q_functions.SingleModelStateQFunctionWithDiscreteAction(model=model) # Use epsilon-greedy for exploration start_epsilon = 1 explorer = explorers.LinearDecayEpsilonGreedy( start_epsilon=start_epsilon, end_epsilon=hyperparams["end_epsilon"], decay_steps=hyperparams["decay_steps"], random_action_func=action_space.sample, ) opt = optim.Adam( q_func.parameters(), lr=hyperparams["lr"], eps=hyperparams["adam_eps"] ) rbuf_capacity = steps rbuf = replay_buffers.ReplayBuffer(rbuf_capacity) agent = DQN( q_func, opt, rbuf, gpu=gpu, gamma=hyperparams["gamma"], explorer=explorer, replay_start_size=hyperparams["replay_start_size"], target_update_interval=hyperparams["target_update_interval"], update_interval=hyperparams["update_interval"], minibatch_size=batch_size, ) eval_env = make_env(test=True) evaluation_hooks = [OptunaPrunerHook(trial=trial)] _, eval_stats_history = experiments.train_agent_with_evaluation( agent=agent, env=env, steps=steps, eval_n_steps=None, eval_n_episodes=eval_n_episodes, eval_interval=eval_interval, outdir=outdir, eval_env=eval_env, train_max_episode_len=train_max_episode_len, evaluation_hooks=evaluation_hooks, ) score = _get_score_from_eval_stats_history(eval_stats_history) return score
def main(): import logging logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--env", type=str, default="Pendulum-v0") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--final-exploration-steps", type=int, default=10**4) parser.add_argument("--start-epsilon", type=float, default=1.0) parser.add_argument("--end-epsilon", type=float, default=0.1) parser.add_argument("--noisy-net-sigma", type=float, default=None) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default=None) parser.add_argument("--steps", type=int, default=10**5) parser.add_argument("--prioritized-replay", action="store_true") parser.add_argument("--replay-start-size", type=int, default=1000) parser.add_argument("--target-update-interval", type=int, default=10**2) parser.add_argument("--target-update-method", type=str, default="hard") parser.add_argument("--soft-update-tau", type=float, default=1e-2) parser.add_argument("--update-interval", type=int, default=1) parser.add_argument("--eval-n-runs", type=int, default=100) parser.add_argument("--eval-interval", type=int, default=10**4) parser.add_argument("--n-hidden-channels", type=int, default=100) parser.add_argument("--n-hidden-layers", type=int, default=2) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--minibatch-size", type=int, default=None) parser.add_argument("--render-train", action="store_true") parser.add_argument("--render-eval", action="store_true") parser.add_argument("--monitor", action="store_true") parser.add_argument("--reward-scale-factor", type=float, default=1e-3) parser.add_argument( "--actor-learner", action="store_true", help="Enable asynchronous sampling with asynchronous actor(s)", ) # NOQA parser.add_argument( "--num-envs", type=int, default=1, help=("The number of environments for sampling (only effective with" " --actor-learner enabled)"), ) # NOQA args = parser.parse_args() # Set a random seed used in PFRL utils.set_random_seed(args.seed) args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print("Output files are saved in {}".format(args.outdir)) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**32 def clip_action_filter(a): return np.clip(a, action_space.low, action_space.high) def make_env(idx=0, test=False): env = gym.make(args.env) # Use different random seeds for train and test envs process_seed = int(process_seeds[idx]) env_seed = 2**32 - 1 - process_seed if test else process_seed utils.set_random_seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = pfrl.wrappers.Monitor(env, args.outdir) if isinstance(env.action_space, spaces.Box): utils.env_modifiers.make_action_filtered(env, clip_action_filter) if not test: # Scale rewards (and thus returns) to a reasonable range so that # training is easier env = pfrl.wrappers.ScaleReward(env, args.reward_scale_factor) if (args.render_eval and test) or (args.render_train and not test): env = pfrl.wrappers.Render(env) return env env = make_env(test=False) timestep_limit = env.spec.max_episode_steps obs_space = env.observation_space obs_size = obs_space.low.size action_space = env.action_space if isinstance(action_space, spaces.Box): action_size = action_space.low.size # Use NAF to apply DQN to continuous action spaces q_func = q_functions.FCQuadraticStateQFunction( obs_size, action_size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, action_space=action_space, ) # Use the Ornstein-Uhlenbeck process for exploration ou_sigma = (action_space.high - action_space.low) * 0.2 explorer = explorers.AdditiveOU(sigma=ou_sigma) else: n_actions = action_space.n q_func = q_functions.FCStateQFunctionWithDiscreteAction( obs_size, n_actions, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, ) # Use epsilon-greedy for exploration explorer = explorers.LinearDecayEpsilonGreedy( args.start_epsilon, args.end_epsilon, args.final_exploration_steps, action_space.sample, ) if args.noisy_net_sigma is not None: pnn.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma) # Turn off explorer explorer = explorers.Greedy() opt = optim.Adam(q_func.parameters()) rbuf_capacity = 5 * 10**5 if args.minibatch_size is None: args.minibatch_size = 32 if args.prioritized_replay: betasteps = (args.steps - args.replay_start_size) // args.update_interval rbuf = replay_buffers.PrioritizedReplayBuffer(rbuf_capacity, betasteps=betasteps) else: rbuf = replay_buffers.ReplayBuffer(rbuf_capacity) agent = DQN( q_func, opt, rbuf, gpu=args.gpu, gamma=args.gamma, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, update_interval=args.update_interval, minibatch_size=args.minibatch_size, target_update_method=args.target_update_method, soft_update_tau=args.soft_update_tau, ) if args.load: agent.load(args.load) eval_env = make_env(test=True) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) elif not args.actor_learner: print( "WARNING: Since https://github.com/pfnet/pfrl/pull/112 we have started" " setting `eval_during_episode=True` in this script, which affects the" " timings of evaluation phases.") experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, eval_env=eval_env, train_max_episode_len=timestep_limit, eval_during_episode=True, ) else: # using impala mode when given num of envs # When we use multiple envs, it is critical to ensure each env # can occupy a CPU core to get the best performance. # Therefore, we need to prevent potential CPU over-provision caused by # multi-threading in Openmp and Numpy. # Disable the multi-threading on Openmp and Numpy. os.environ["OMP_NUM_THREADS"] = "1" # NOQA ( make_actor, learner, poller, exception_event, ) = agent.setup_actor_learner_training(args.num_envs) poller.start() learner.start() experiments.train_agent_async( processes=args.num_envs, make_agent=make_actor, make_env=make_env, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, stop_event=learner.stop_event, exception_event=exception_event, ) poller.stop() learner.stop() poller.join() learner.join()
def main(): import logging parser = argparse.ArgumentParser() parser.add_argument("--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU.") parser.add_argument("--num-envs", type=int, default=1, help="Number of envs run in parallel.") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument( "--steps", type=int, default=2 * 10**6, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--eval-interval", type=int, default=100000, help="Interval in timesteps between evaluations.", ) parser.add_argument( "--eval-n-runs", type=int, default=100, help="Number of episodes run for each evaluation.", ) parser.add_argument("--render", action="store_true", help="Render env states in a GUI window.") parser.add_argument("--demo", action="store_true", help="Just run evaluation, not training.") parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--load", type=str, default="", help="Directory to load agent from.") parser.add_argument("--log-level", type=int, default=logging.INFO, help="Level of the root logger.") parser.add_argument("--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor.") parser.add_argument( "--log-interval", type=int, default=1000, help= "Interval in timesteps between outputting log messages during training", ) parser.add_argument( "--update-interval", type=int, default=2048, help="Interval in timesteps between model updates.", ) parser.add_argument( "--epochs", type=int, default=10, help="Number of epochs to update model for per PPO iteration.", ) parser.add_argument("--batch-size", type=int, default=64, help="Minibatch size") current_dir = os.path.dirname(os.path.realpath(__file__)) parser.add_argument('--cfg', type=str, default=os.path.abspath( os.path.join(current_dir, "raisimlib", "raisimGymTorch", "raisimGymTorch", "env", "envs", "rsg_anymal", "cfg.yaml")), help='configuration file') cfg_abs_path = parser.parse_args().cfg cfg = YAML().load(open(cfg_abs_path, 'r')) args = parser.parse_args() logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL utils.set_random_seed(args.seed) # Set different random seeds for different subprocesses. # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3]. # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7]. process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs assert process_seeds.max() < 2**31 args.outdir = experiments.prepare_output_dir(args, args.outdir) def make_env(process_idx, test): visualize = True if process_idx == 0 and args.render else False env = myEnv(os.path.join(current_dir, "raisimlib", "rsc"), cfg, visualize) # Use different random seeds for train and test envs process_seed = int(process_seeds[process_idx]) env_seed = 2**31 - 1 - process_seed if test else process_seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) env = NormalizeObsSpace(env) env = pfrl.wrappers.NormalizeActionSpace(env) if args.monitor: env = pfrl.wrappers.Monitor(env, args.outdir) return env def make_batch_env(test): return pfrl.envs.MultiprocessVectorEnv([ functools.partial(make_env, idx, test) for idx, env in enumerate(range(args.num_envs)) ]) # Only for getting timesteps, and obs-action spaces sample_env = myEnv(os.path.join(current_dir, "raisimlib", "rsc"), cfg, False) timestep_limit = 100000 #sample_env.spec.max_episode_steps obs_space = sample_env.observation_space action_space = sample_env.action_space print("Observation space:", obs_space) print("Action space:", action_space) assert isinstance(action_space, gym.spaces.Box) # Normalize observations based on their empirical mean and variance obs_normalizer = pfrl.nn.EmpiricalNormalization(obs_space.low.size, clip_threshold=5) obs_size = obs_space.low.size action_size = action_space.low.size policy = torch.nn.Sequential( nn.Linear(obs_size, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, action_size), pfrl.policies.GaussianHeadWithStateIndependentCovariance( action_size=action_size, var_type="diagonal", var_func=lambda x: torch.exp(2 * x), # Parameterize log std var_param_init=0, # log std = 0 => std = 1 ), ) vf = torch.nn.Sequential( nn.Linear(obs_size, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, 1), ) # While the original paper initialized weights by normal distribution, # we use orthogonal initialization as the latest openai/baselines does. def ortho_init(layer, gain): nn.init.orthogonal_(layer.weight, gain=gain) nn.init.zeros_(layer.bias) ortho_init(policy[0], gain=1) ortho_init(policy[2], gain=1) ortho_init(policy[4], gain=1e-2) ortho_init(vf[0], gain=1) ortho_init(vf[2], gain=1) ortho_init(vf[4], gain=1) # Combine a policy and a value function into a single model model = pfrl.nn.Branched(policy, vf) opt = torch.optim.Adam(model.parameters(), lr=3e-4, eps=1e-5) agent = PPO( model, opt, obs_normalizer=obs_normalizer, gpu=args.gpu, update_interval=args.update_interval, minibatch_size=args.batch_size, epochs=args.epochs, clip_eps_vf=None, entropy_coef=0, standardize_advantages=True, gamma=0.998, lambd=0.95, ) if args.load or args.load_pretrained: if args.load_pretrained: raise Exception("Pretrained models are currently unsupported.") # either load or load_pretrained must be false assert not args.load or not args.load_pretrained if args.load: agent.load(args.load) else: agent.load( utils.download_model("PPO", args.env, model_type="final")[0]) if args.demo: env = make_env(0, True) eval_stats = experiments.eval_performance( env=env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) elif args.num_envs == 1: experiments.train_agent_with_evaluation( agent=agent, env=make_env(0, False), eval_env=make_env(0, True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, save_best_so_far_agent=True, use_tensorboard=True) else: experiments.train_agent_batch_with_evaluation( agent=agent, env=make_batch_env(False), eval_env=make_batch_env(True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, log_interval=args.log_interval, max_episode_len=timestep_limit, save_best_so_far_agent=True, )
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument( "--env", type=str, default="Hopper-v2", help="OpenAI Gym MuJoCo env to perform algorithm on.", ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument("--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU.") parser.add_argument("--load", type=str, default="", help="Directory to load agent from.") parser.add_argument( "--steps", type=int, default=10**6, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--eval-n-runs", type=int, default=10, help="Number of episodes run for each evaluation.", ) parser.add_argument( "--eval-interval", type=int, default=5000, help="Interval in timesteps between evaluations.", ) parser.add_argument( "--replay-start-size", type=int, default=10000, help="Minimum replay buffer size before " + "performing gradient updates.", ) parser.add_argument("--batch-size", type=int, default=100, help="Minibatch size") parser.add_argument("--render", action="store_true", help="Render env states in a GUI window.") parser.add_argument("--demo", action="store_true", help="Just run evaluation, not training.") parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--pretrained-type", type=str, default="best", choices=["best", "final"]) parser.add_argument("--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor.") parser.add_argument("--log-level", type=int, default=logging.INFO, help="Level of the root logger.") args = parser.parse_args() logging.basicConfig(level=args.log_level) args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv) print("Output files are saved in {}".format(args.outdir)) # Set a random seed used in PFRL utils.set_random_seed(args.seed) def make_env(test): env = gym.make(args.env) # Unwrap TimeLimit wrapper assert isinstance(env, gym.wrappers.TimeLimit) env = env.env # Use different random seeds for train and test envs env_seed = 2**32 - 1 - args.seed if test else args.seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = pfrl.wrappers.Monitor(env, args.outdir) if args.render and not test: env = pfrl.wrappers.Render(env) return env env = make_env(test=False) timestep_limit = env.spec.max_episode_steps obs_space = env.observation_space action_space = env.action_space print("Observation space:", obs_space) print("Action space:", action_space) obs_size = obs_space.low.size action_size = action_space.low.size policy = nn.Sequential( nn.Linear(obs_size, 400), nn.ReLU(), nn.Linear(400, 300), nn.ReLU(), nn.Linear(300, action_size), nn.Tanh(), pfrl.policies.DeterministicHead(), ) policy_optimizer = torch.optim.Adam(policy.parameters()) def make_q_func_with_optimizer(): q_func = nn.Sequential( pfrl.nn.ConcatObsAndAction(), nn.Linear(obs_size + action_size, 400), nn.ReLU(), nn.Linear(400, 300), nn.ReLU(), nn.Linear(300, 1), ) q_func_optimizer = torch.optim.Adam(q_func.parameters()) return q_func, q_func_optimizer q_func1, q_func1_optimizer = make_q_func_with_optimizer() q_func2, q_func2_optimizer = make_q_func_with_optimizer() rbuf = replay_buffers.ReplayBuffer(10**6) explorer = explorers.AdditiveGaussian(scale=0.1, low=action_space.low, high=action_space.high) def burnin_action_func(): """Select random actions until model is updated one or more times.""" return np.random.uniform(action_space.low, action_space.high).astype(np.float32) # Hyperparameters in http://arxiv.org/abs/1802.09477 agent = pfrl.agents.TD3( policy, q_func1, q_func2, policy_optimizer, q_func1_optimizer, q_func2_optimizer, rbuf, gamma=0.99, soft_update_tau=5e-3, explorer=explorer, replay_start_size=args.replay_start_size, gpu=args.gpu, minibatch_size=args.batch_size, burnin_action_func=burnin_action_func, ) if len(args.load) > 0 or args.load_pretrained: # either load or load_pretrained must be false assert not len(args.load) > 0 or not args.load_pretrained if len(args.load) > 0: agent.load(args.load) else: agent.load( utils.download_model("TD3", args.env, model_type=args.pretrained_type)[0]) eval_env = make_env(test=True) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) import json import os with open(os.path.join(args.outdir, "demo_scores.json"), "w") as f: json.dump(eval_stats, f) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_env=eval_env, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, train_max_episode_len=timestep_limit, )
def main(): import logging parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="CartPole-v0") parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--beta", type=float, default=1e-4) parser.add_argument("--batchsize", type=int, default=10) parser.add_argument("--steps", type=int, default=10**5) parser.add_argument("--eval-interval", type=int, default=10**4) parser.add_argument("--eval-n-runs", type=int, default=100) parser.add_argument("--reward-scale-factor", type=float, default=1e-2) parser.add_argument("--render", action="store_true", default=False) parser.add_argument("--lr", type=float, default=1e-3) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default="") parser.add_argument("--log-level", type=int, default=logging.INFO) parser.add_argument("--monitor", action="store_true") args = parser.parse_args() logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) args.outdir = experiments.prepare_output_dir(args, args.outdir) def make_env(test): env = gym.make(args.env) # Use different random seeds for train and test envs env_seed = 2**32 - 1 - args.seed if test else args.seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = pfrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = pfrl.wrappers.Monitor(env, args.outdir) if not test: # Scale rewards (and thus returns) to a reasonable range so that # training is easier env = pfrl.wrappers.ScaleReward(env, args.reward_scale_factor) if args.render and not test: env = pfrl.wrappers.Render(env) return env train_env = make_env(test=False) timestep_limit = train_env.spec.max_episode_steps obs_space = train_env.observation_space action_space = train_env.action_space obs_size = obs_space.low.size hidden_size = 200 # Switch policy types accordingly to action space types if isinstance(action_space, gym.spaces.Box): model = nn.Sequential( nn.Linear(obs_size, hidden_size), nn.LeakyReLU(0.2), nn.Linear(hidden_size, hidden_size), nn.LeakyReLU(0.2), nn.Linear(hidden_size, action_space.low.size), GaussianHeadWithFixedCovariance(0.3), ) else: model = nn.Sequential( nn.Linear(obs_size, hidden_size), nn.LeakyReLU(0.2), nn.Linear(hidden_size, hidden_size), nn.LeakyReLU(0.2), nn.Linear(hidden_size, action_space.n), SoftmaxCategoricalHead(), ) opt = torch.optim.Adam(model.parameters(), lr=args.lr) agent = pfrl.agents.REINFORCE( model, opt, gpu=args.gpu, beta=args.beta, batchsize=args.batchsize, max_grad_norm=1.0, ) if args.load: agent.load(args.load) eval_env = make_env(test=True) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=train_env, eval_env=eval_env, outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, train_max_episode_len=timestep_limit, )
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--env", type=str, default="BreakoutNoFrameskip-v4", help="OpenAI Atari domain to perform algorithm on.", ) parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU.") parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--pretrained-type", type=str, default="best", choices=["best", "final"]) parser.add_argument("--load", type=str, default=None) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument( "--steps", type=int, default=5 * 10**7, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--replay-start-size", type=int, default=5 * 10**4, help="Minimum replay buffer size before " + "performing gradient updates.", ) parser.add_argument("--eval-n-steps", type=int, default=125000) parser.add_argument("--eval-interval", type=int, default=250000) parser.add_argument("--n-best-episodes", type=int, default=30) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2**31 - 1 - args.seed args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(test): # Use different random seeds for train and test envs env_seed = test_seed if test else train_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=None), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if test: # Randomize actions like epsilon-greedy in evaluation as well env = pfrl.wrappers.RandomizeAction(env, 0.05) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env env = make_env(test=False) eval_env = make_env(test=True) n_actions = env.action_space.n q_func = nn.Sequential( pnn.LargeAtariCNN(), init_chainer_default(nn.Linear(512, n_actions)), DiscreteActionValueHead(), ) # Use the same hyperparameters as the Nature paper opt = pfrl.optimizers.RMSpropEpsInsideSqrt( q_func.parameters(), lr=2.5e-4, alpha=0.95, momentum=0.0, eps=1e-2, centered=True, ) rbuf = replay_buffers.ReplayBuffer(10**6) explorer = explorers.LinearDecayEpsilonGreedy( start_epsilon=1.0, end_epsilon=0.1, decay_steps=10**6, random_action_func=lambda: np.random.randint(n_actions), ) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 Agent = agents.DQN agent = Agent( q_func, opt, rbuf, gpu=args.gpu, gamma=0.99, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=10**4, clip_delta=True, update_interval=4, batch_accumulator="sum", phi=phi, ) if args.load or args.load_pretrained: # either load or load_pretrained must be false assert not args.load or not args.load_pretrained if args.load: agent.load(args.load) else: agent.load( utils.download_model("DQN", args.env, model_type=args.pretrained_type)[0]) if args.demo: eval_stats = experiments.eval_performance(env=eval_env, agent=agent, n_steps=args.eval_n_steps, n_episodes=None) print("n_episodes: {} mean: {} median: {} stdev {}".format( eval_stats["episodes"], eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=args.eval_n_steps, eval_n_episodes=None, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=True, eval_env=eval_env, ) dir_of_best_network = os.path.join(args.outdir, "best") agent.load(dir_of_best_network) # run 30 evaluation episodes, each capped at 5 mins of play stats = experiments.evaluator.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.n_best_episodes, max_episode_len=4500, logger=None, ) with open(os.path.join(args.outdir, "bestscores.json"), "w") as f: json.dump(stats, f) print("The results of the best scoring network:") for stat in stats: print(str(stat) + ":" + str(stats[stat]))
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--pretrained-type", type=str, default="best", choices=["best", "final"]) parser.add_argument("--load", type=str, default=None) parser.add_argument("--final-exploration-frames", type=int, default=10**6) parser.add_argument("--final-epsilon", type=float, default=0.01) parser.add_argument("--eval-epsilon", type=float, default=0.001) parser.add_argument("--steps", type=int, default=5 * 10**7) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--replay-start-size", type=int, default=5 * 10**4) parser.add_argument("--target-update-interval", type=int, default=10**4) parser.add_argument("--eval-interval", type=int, default=250000) parser.add_argument("--eval-n-steps", type=int, default=125000) parser.add_argument("--update-interval", type=int, default=4) parser.add_argument("--batch-size", type=int, default=32) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument("--batch-accumulator", type=str, default="mean", choices=["mean", "sum"]) parser.add_argument("--quantile-thresholds-N", type=int, default=64) parser.add_argument("--quantile-thresholds-N-prime", type=int, default=64) parser.add_argument("--quantile-thresholds-K", type=int, default=32) parser.add_argument("--n-best-episodes", type=int, default=200) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2**31 - 1 - args.seed args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(test): # Use different random seeds for train and test envs env_seed = test_seed if test else train_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if test: # Randomize actions like epsilon-greedy in evaluation as well env = pfrl.wrappers.RandomizeAction(env, args.eval_epsilon) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env env = make_env(test=False) eval_env = make_env(test=True) n_actions = env.action_space.n q_func = pfrl.agents.iqn.ImplicitQuantileQFunction( psi=nn.Sequential( nn.Conv2d(4, 32, 8, stride=4), nn.ReLU(), nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(), nn.Conv2d(64, 64, 3, stride=1), nn.ReLU(), nn.Flatten(), ), phi=nn.Sequential( pfrl.agents.iqn.CosineBasisLinear(64, 3136), nn.ReLU(), ), f=nn.Sequential( nn.Linear(3136, 512), nn.ReLU(), nn.Linear(512, n_actions), ), ) # Use the same hyper parameters as https://arxiv.org/abs/1710.10044 opt = torch.optim.Adam(q_func.parameters(), lr=5e-5, eps=1e-2 / args.batch_size) rbuf = replay_buffers.ReplayBuffer(10**6) explorer = explorers.LinearDecayEpsilonGreedy( 1.0, args.final_epsilon, args.final_exploration_frames, lambda: np.random.randint(n_actions), ) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 agent = pfrl.agents.IQN( q_func, opt, rbuf, gpu=args.gpu, gamma=0.99, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, update_interval=args.update_interval, batch_accumulator=args.batch_accumulator, phi=phi, quantile_thresholds_N=args.quantile_thresholds_N, quantile_thresholds_N_prime=args.quantile_thresholds_N_prime, quantile_thresholds_K=args.quantile_thresholds_K, ) if args.load or args.load_pretrained: # either load or load_pretrained must be false assert not args.load or not args.load_pretrained if args.load: agent.load(args.load) else: agent.load( utils.download_model("IQN", args.env, model_type=args.pretrained_type)[0]) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=args.eval_n_steps, n_episodes=None, ) print("n_steps: {} mean: {} median: {} stdev {}".format( args.eval_n_steps, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=args.eval_n_steps, eval_n_episodes=None, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=True, eval_env=eval_env, ) dir_of_best_network = os.path.join(args.outdir, "best") agent.load(dir_of_best_network) # run 200 evaluation episodes, each capped at 30 mins of play stats = experiments.evaluator.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.n_best_episodes, max_episode_len=args.max_frames / 4, logger=None, ) with open(os.path.join(args.outdir, "bestscores.json"), "w") as f: json.dump(stats, f) print("The results of the best scoring network:") for stat in stats: print(str(stat) + ":" + str(stats[stat]))
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--pretrained-type", type=str, default="best", choices=["best", "final"]) parser.add_argument("--load", type=str, default=None) parser.add_argument("--eval-epsilon", type=float, default=0.0) parser.add_argument("--noisy-net-sigma", type=float, default=0.2) parser.add_argument("--steps", type=int, default=5 * 10**7) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--replay-start-size", type=int, default=2 * 10**3) parser.add_argument("--eval-n-steps", type=int, default=125000) parser.add_argument("--eval-interval", type=int, default=250000) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument("--n-best-episodes", type=int, default=200) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2**31 - 1 - args.seed test_ID = datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S") args.outdir = experiments.prepare_output_dir(args, args.outdir, test_ID) print("Output files are saved in {}".format(args.outdir)) env = MapRootEnv() eval_env = MapRootEnv() n_actions = env.action_space.n input_shape = env.input_shape n_atoms = 51 v_max = 10 v_min = -10 q_func = MyDistributionalDuelingDQN(n_actions, n_atoms, v_min, v_max, input_shape[2]) # Noisy nets pnn.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma) # Turn off explorer explorer = explorers.Greedy() # Use the same hyper parameters as https://arxiv.org/abs/1710.02298 opt = torch.optim.Adam(q_func.parameters(), 6.25e-5, eps=1.5 * 10**-4) # Prioritized Replay # Anneal beta from beta0 to 1 throughout training update_interval = 4 betasteps = args.steps / update_interval rbuf = replay_buffers.PrioritizedReplayBuffer( 5 * 10**4, alpha=0.5, beta0=0.4, betasteps=betasteps, num_steps=3, normalize_by_max="batch", ) Agent = agents.CategoricalDoubleDQN agent = Agent(q_func, opt, rbuf, gpu=args.gpu, gamma=0.80, explorer=explorer, minibatch_size=32, replay_start_size=args.replay_start_size, target_update_interval=32000, update_interval=update_interval, batch_accumulator="mean") if args.demo: eval_stats = experiments.eval_performance(env=eval_env, agent=agent, n_steps=args.eval_n_steps, n_episodes=None) print("n_episodes: {} mean: {} median: {} stdev {}".format( eval_stats["episodes"], eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=args.eval_n_steps, eval_n_episodes=None, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=True, eval_env=eval_env, logger=TBLogger(args.outdir)) # dir_of_best_network = os.path.join(args.outdir, "best") # agent.load(dir_of_best_network) # run 200 evaluation episodes, each capped at 30 mins of play stats = experiments.evaluator.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.n_best_episodes, max_episode_len=args.max_frames / 4, logger=None) with open(os.path.join(args.outdir, "bestscores.json"), "w") as f: json.dump(stats, f) print("The results of the best scoring network:") for stat in stats: print(str(stat) + ":" + str(stats[stat]))
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="BreakoutNoFrameskip-v4") parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load-pretrained", action="store_true", default=False) parser.add_argument("--pretrained-type", type=str, default="best", choices=["best", "final"]) parser.add_argument("--load", type=str, default=None) parser.add_argument("--eval-epsilon", type=float, default=0.0) parser.add_argument("--noisy-net-sigma", type=float, default=0.5) parser.add_argument("--steps", type=int, default=5 * 10**7) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument("--replay-start-size", type=int, default=2 * 10**4) parser.add_argument("--eval-n-steps", type=int, default=125000) parser.add_argument("--eval-interval", type=int, default=250000) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument("--n-best-episodes", type=int, default=200) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2**31 - 1 - args.seed args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(test): # Use different random seeds for train and test envs env_seed = test_seed if test else train_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if test: # Randomize actions like epsilon-greedy in evaluation as well env = pfrl.wrappers.RandomizeAction(env, args.eval_epsilon) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env env = make_env(test=False) eval_env = make_env(test=True) n_actions = env.action_space.n n_atoms = 51 v_max = 10 v_min = -10 q_func = DistributionalDuelingDQN( n_actions, n_atoms, v_min, v_max, ) # Noisy nets pnn.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma) # Turn off explorer explorer = explorers.Greedy() # Use the same hyper parameters as https://arxiv.org/abs/1710.02298 opt = torch.optim.Adam(q_func.parameters(), 6.25e-5, eps=1.5 * 10**-4) # Prioritized Replay # Anneal beta from beta0 to 1 throughout training update_interval = 4 betasteps = args.steps / update_interval rbuf = replay_buffers.PrioritizedReplayBuffer( 10**6, alpha=0.5, beta0=0.4, betasteps=betasteps, num_steps=3, normalize_by_max="memory", ) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 Agent = agents.CategoricalDoubleDQN agent = Agent( q_func, opt, rbuf, gpu=args.gpu, gamma=0.99, explorer=explorer, minibatch_size=32, replay_start_size=args.replay_start_size, target_update_interval=32000, update_interval=update_interval, batch_accumulator="mean", phi=phi, ) if args.load or args.load_pretrained: # either load_ or load_pretrained must be false assert not args.load or not args.load_pretrained if args.load: agent.load(args.load) else: agent.load( utils.download_model("Rainbow", args.env, model_type=args.pretrained_type)[0]) if args.demo: eval_stats = experiments.eval_performance(env=eval_env, agent=agent, n_steps=args.eval_n_steps, n_episodes=None) print("n_episodes: {} mean: {} median: {} stdev {}".format( eval_stats["episodes"], eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=args.eval_n_steps, eval_n_episodes=None, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=True, eval_env=eval_env, ) dir_of_best_network = os.path.join(args.outdir, "best") agent.load(dir_of_best_network) # run 200 evaluation episodes, each capped at 30 mins of play stats = experiments.evaluator.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.n_best_episodes, max_episode_len=args.max_frames / 4, logger=None, ) with open(os.path.join(args.outdir, "bestscores.json"), "w") as f: json.dump(stats, f) print("The results of the best scoring network:") for stat in stats: print(str(stat) + ":" + str(stats[stat]))
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--env", type=str, default="BreakoutNoFrameskip-v4", help="OpenAI Atari domain to perform algorithm on.", ) parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU.") parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default=None) parser.add_argument( "--final-exploration-frames", type=int, default=10**6, help="Timesteps after which we stop " + "annealing exploration rate", ) parser.add_argument( "--final-epsilon", type=float, default=0.01, help="Final value of epsilon during training.", ) parser.add_argument( "--eval-epsilon", type=float, default=0.001, help="Exploration epsilon used during eval episodes.", ) parser.add_argument("--noisy-net-sigma", type=float, default=None) parser.add_argument( "--arch", type=str, default="doubledqn", choices=["nature", "nips", "dueling", "doubledqn"], help="Network architecture to use.", ) parser.add_argument( "--steps", type=int, default=5 * 10**7, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument( "--replay-start-size", type=int, default=5 * 10**4, help="Minimum replay buffer size before " + "performing gradient updates.", ) parser.add_argument( "--target-update-interval", type=int, default=3 * 10**4, help="Frequency (in timesteps) at which " + "the target network is updated.", ) parser.add_argument( "--eval-interval", type=int, default=10**5, help="Frequency (in timesteps) of evaluation phase.", ) parser.add_argument( "--update-interval", type=int, default=4, help="Frequency (in timesteps) of network updates.", ) parser.add_argument("--eval-n-runs", type=int, default=10) parser.add_argument("--no-clip-delta", dest="clip_delta", action="store_false") parser.add_argument("--num-step-return", type=int, default=1) parser.set_defaults(clip_delta=True) parser.add_argument("--agent", type=str, default="DoubleDQN", choices=["DQN", "DoubleDQN", "PAL"]) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument("--lr", type=float, default=2.5e-4, help="Learning rate.") parser.add_argument( "--prioritized", action="store_true", default=False, help="Use prioritized experience replay.", ) parser.add_argument( "--checkpoint-frequency", type=int, default=None, help="Frequency at which agents are stored.", ) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2**31 - 1 - args.seed args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(test): # Use different random seeds for train and test envs env_seed = test_seed if test else train_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, ) env.seed(int(env_seed)) if test: # Randomize actions like epsilon-greedy in evaluation as well env = pfrl.wrappers.RandomizeAction(env, args.eval_epsilon) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env env = make_env(test=False) eval_env = make_env(test=True) n_actions = env.action_space.n q_func = parse_arch(args.arch, n_actions) if args.noisy_net_sigma is not None: pnn.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma) # Turn off explorer explorer = explorers.Greedy() else: explorer = explorers.LinearDecayEpsilonGreedy( 1.0, args.final_epsilon, args.final_exploration_frames, lambda: np.random.randint(n_actions), ) # Use the Nature paper's hyperparameters opt = pfrl.optimizers.RMSpropEpsInsideSqrt( q_func.parameters(), lr=args.lr, alpha=0.95, momentum=0.0, eps=1e-2, centered=True, ) # Select a replay buffer to use if args.prioritized: # Anneal beta from beta0 to 1 throughout training betasteps = args.steps / args.update_interval rbuf = replay_buffers.PrioritizedReplayBuffer( 10**6, alpha=0.6, beta0=0.4, betasteps=betasteps, num_steps=args.num_step_return, ) else: rbuf = replay_buffers.ReplayBuffer(10**6, args.num_step_return) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 Agent = parse_agent(args.agent) agent = Agent( q_func, opt, rbuf, gpu=args.gpu, gamma=0.99, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, clip_delta=args.clip_delta, update_interval=args.update_interval, batch_accumulator="sum", phi=phi, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance(env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs) print("n_runs: {} mean: {} median: {} stdev {}".format( args.eval_n_runs, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=None, checkpoint_freq=args.checkpoint_frequency, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=False, eval_env=eval_env, )
def main(): parser = argparse.ArgumentParser() parser.add_argument("--env", type=str, default="SlimeVolley-v0") parser.add_argument( "--outdir", type=str, default="results", help=( "Directory path to save output files." " If it does not exist, it will be created." ), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)") parser.add_argument("--gpu", type=int, default=0) parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default=None) parser.add_argument("--noisy-net-sigma", type=float, default=0.1) parser.add_argument("--steps", type=int, default=2 * 10 ** 6) parser.add_argument("--replay-start-size", type=int, default=1600) parser.add_argument("--eval-n-episodes", type=int, default=1000) parser.add_argument("--eval-interval", type=int, default=250000) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help=( "Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument("--gamma", type=float, default=0.98) parser.add_argument("--v-max", type=float, default=1) parser.add_argument("--n-step-return", type=int, default=3) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2 ** 31 - 1 - args.seed args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(test): if "SlimeVolley" in args.env: # You need to install slimevolleygym import slimevolleygym # NOQA env = gym.make(args.env) # Use different random seeds for train and test envs env_seed = test_seed if test else train_seed env.seed(int(env_seed)) if args.monitor: env = pfrl.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training" ) if args.render: env = pfrl.wrappers.Render(env) if isinstance(env.action_space, gym.spaces.MultiBinary): env = MultiBinaryAsDiscreteAction(env) return env env = make_env(test=False) eval_env = make_env(test=True) obs_size = env.observation_space.low.size n_actions = env.action_space.n n_atoms = 51 v_max = args.v_max v_min = -args.v_max hidden_size = 512 q_func = nn.Sequential( nn.Linear(obs_size, hidden_size), nn.ReLU(), nn.Linear(hidden_size, hidden_size), nn.ReLU(), DistributionalDuelingHead(hidden_size, n_actions, n_atoms, v_min, v_max), ) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) # Noisy nets pnn.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma) # Turn off explorer explorer = explorers.Greedy() # Use the same eps as https://arxiv.org/abs/1710.02298 opt = torch.optim.Adam(q_func.parameters(), 1e-4, eps=1.5e-4) # Prioritized Replay # Anneal beta from beta0 to 1 throughout training update_interval = 1 betasteps = args.steps / update_interval rbuf = replay_buffers.PrioritizedReplayBuffer( 10 ** 6, alpha=0.5, beta0=0.4, betasteps=betasteps, num_steps=args.n_step_return, normalize_by_max="memory", ) agent = agents.CategoricalDoubleDQN( q_func, opt, rbuf, gpu=args.gpu, gamma=args.gamma, explorer=explorer, minibatch_size=32, replay_start_size=args.replay_start_size, target_update_interval=2000, update_interval=update_interval, batch_accumulator="mean", phi=phi, max_grad_norm=10, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_episodes, ) print( "n_episodes: {} mean: {} median: {} stdev {}".format( eval_stats["episodes"], eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], ) ) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_episodes, eval_interval=args.eval_interval, outdir=args.outdir, save_best_so_far_agent=True, eval_env=eval_env, )
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--env", type=str, default="BreakoutNoFrameskip-v4", help="OpenAI Atari domain to perform algorithm on.", ) parser.add_argument( "--outdir", type=str, default="results", help=("Directory path to save output files." " If it does not exist, it will be created."), ) parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 31)") parser.add_argument("--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU.") parser.add_argument("--demo", action="store_true", default=False) parser.add_argument("--load", type=str, default=None) parser.add_argument( "--final-exploration-frames", type=int, default=10**6, help="Timesteps after which we stop " + "annealing exploration rate", ) parser.add_argument( "--final-epsilon", type=float, default=0.01, help="Final value of epsilon during training.", ) parser.add_argument( "--eval-epsilon", type=float, default=0.001, help="Exploration epsilon used during eval episodes.", ) parser.add_argument( "--steps", type=int, default=5 * 10**7, help="Total number of timesteps to train the agent.", ) parser.add_argument( "--max-frames", type=int, default=30 * 60 * 60, # 30 minutes with 60 fps help="Maximum number of frames for each episode.", ) parser.add_argument( "--replay-start-size", type=int, default=5 * 10**4, help="Minimum replay buffer size before " + "performing gradient updates.", ) parser.add_argument( "--target-update-interval", type=int, default=3 * 10**4, help="Frequency (in timesteps) at which " + "the target network is updated.", ) parser.add_argument("--demo-n-episodes", type=int, default=30) parser.add_argument("--eval-n-steps", type=int, default=125000) parser.add_argument( "--eval-interval", type=int, default=250000, help="Frequency (in timesteps) of evaluation phase.", ) parser.add_argument( "--update-interval", type=int, default=4, help="Frequency (in timesteps) of network updates.", ) parser.add_argument( "--log-level", type=int, default=20, help="Logging level. 10:DEBUG, 20:INFO etc.", ) parser.add_argument( "--render", action="store_true", default=False, help="Render env states in a GUI window.", ) parser.add_argument( "--monitor", action="store_true", default=False, help= ("Monitor env. Videos and additional information are saved as output files." ), ) parser.add_argument("--lr", type=float, default=2.5e-4, help="Learning rate.") parser.add_argument( "--recurrent", action="store_true", default=False, help="Use a recurrent model. See the code for the model definition.", ) parser.add_argument( "--flicker", action="store_true", default=False, help=("Use so-called flickering Atari, where each" " screen is blacked out with probability 0.5."), ) parser.add_argument( "--no-frame-stack", action="store_true", default=False, help= ("Disable frame stacking so that the agent can only see the current screen." ), ) parser.add_argument( "--episodic-update-len", type=int, default=10, help="Maximum length of sequences for updating recurrent models", ) parser.add_argument( "--batch-size", type=int, default=32, help=("Number of transitions (in a non-recurrent case)" " or sequences (in a recurrent case) used for an" " update."), ) args = parser.parse_args() import logging logging.basicConfig(level=args.log_level) # Set a random seed used in PFRL. utils.set_random_seed(args.seed) # Set different random seeds for train and test envs. train_seed = args.seed test_seed = 2**31 - 1 - args.seed args.outdir = experiments.prepare_output_dir(args, args.outdir) print("Output files are saved in {}".format(args.outdir)) def make_env(test): # Use different random seeds for train and test envs env_seed = test_seed if test else train_seed env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari(args.env, max_frames=args.max_frames), episode_life=not test, clip_rewards=not test, flicker=args.flicker, frame_stack=not args.no_frame_stack, ) env.seed(int(env_seed)) if test: # Randomize actions like epsilon-greedy in evaluation as well env = pfrl.wrappers.RandomizeAction(env, args.eval_epsilon) if args.monitor: env = gym.wrappers.Monitor( env, args.outdir, mode="evaluation" if test else "training") if args.render: env = pfrl.wrappers.Render(env) return env env = make_env(test=False) eval_env = make_env(test=True) print("Observation space", env.observation_space) print("Action space", env.action_space) n_frames = env.observation_space.shape[0] n_actions = env.action_space.n if args.recurrent: # Q-network with LSTM q_func = pfrl.nn.RecurrentSequential( nn.Conv2d(n_frames, 32, 8, stride=4), nn.ReLU(), nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(), nn.Conv2d(64, 64, 3, stride=1), nn.Flatten(), nn.ReLU(), nn.LSTM(input_size=3136, hidden_size=512), nn.Linear(512, n_actions), DiscreteActionValueHead(), ) # Replay buffer that stores whole episodes rbuf = replay_buffers.EpisodicReplayBuffer(10**6) else: # Q-network without LSTM q_func = nn.Sequential( nn.Conv2d(n_frames, 32, 8, stride=4), nn.ReLU(), nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(), nn.Conv2d(64, 64, 3, stride=1), nn.Flatten(), nn.Linear(3136, 512), nn.ReLU(), nn.Linear(512, n_actions), DiscreteActionValueHead(), ) # Replay buffer that stores transitions separately rbuf = replay_buffers.ReplayBuffer(10**6) explorer = explorers.LinearDecayEpsilonGreedy( 1.0, args.final_epsilon, args.final_exploration_frames, lambda: np.random.randint(n_actions), ) opt = torch.optim.Adam(q_func.parameters(), lr=1e-4, eps=1e-4) def phi(x): # Feature extractor return np.asarray(x, dtype=np.float32) / 255 agent = pfrl.agents.DoubleDQN( q_func, opt, rbuf, gpu=args.gpu, gamma=0.99, explorer=explorer, replay_start_size=args.replay_start_size, target_update_interval=args.target_update_interval, update_interval=args.update_interval, batch_accumulator="mean", phi=phi, minibatch_size=args.batch_size, episodic_update_len=args.episodic_update_len, recurrent=args.recurrent, ) if args.load: agent.load(args.load) if args.demo: eval_stats = experiments.eval_performance( env=eval_env, agent=agent, n_steps=None, n_episodes=args.demo_n_episodes, ) print("n_runs: {} mean: {} median: {} stdev {}".format( args.demo_n_episodes, eval_stats["mean"], eval_stats["median"], eval_stats["stdev"], )) else: experiments.train_agent_with_evaluation( agent=agent, env=env, steps=args.steps, eval_n_steps=args.eval_n_steps, eval_n_episodes=None, eval_interval=args.eval_interval, outdir=args.outdir, eval_env=eval_env, )