def evaluate(environment_config, agent_config, options): """ Evaluate an agent interacting with an environment. :param environment_config: the path of the environment configuration file :param agent_config: the path of the agent configuration file :param options: the evaluation options """ gym.logger.set_level( gym.logger.DEBUG if options['--verbose'] else gym.logger.INFO) env = load_environment(environment_config) agent = load_agent(agent_config, env) run_directory = Path(agent_config).with_suffix( '').name if options['--name-from-config'] else None options['--seed'] = int( options['--seed']) if options['--seed'] is not None else None evaluation = Evaluation(env, agent, run_directory=run_directory, num_episodes=int(options['--episodes']), sim_seed=options['--seed'], recover=options['--recover'], display_env=not options['--no-display'], display_agent=not options['--no-display'], display_rewards=not options['--no-display']) if options['--train']: evaluation.train() elif options['--test']: evaluation.test() else: evaluation.close() if options['--analyze'] and not options['<benchmark>']: RunAnalyzer([evaluation.monitor.directory]) return os.path.relpath(evaluation.monitor.directory)
def evaluate(environment_config, agent_config, options): """ Evaluate an agent interacting with an environment. :param environment_config: the path of the environment configuration file :param agent_config: the path of the agent configuration file :param options: the evaluation options """ logger.configure(LOGGING_CONFIG) if options['--verbose']: logger.configure(VERBOSE_CONFIG) env = load_environment(environment_config) agent = load_agent(agent_config, env) run_directory = Path(agent_config).with_suffix( '').name if options['--name-from-config'] else None options['--seed'] = int( options['--seed']) if options['--seed'] is not None else None evaluation = Evaluation(env, agent, run_directory=run_directory, num_episodes=int(options['--episodes']), sim_seed=options['--seed'], recover=options['--recover'] or options['--recover-from'], display_env=not options['--no-display'], display_agent=not options['--no-display'], display_rewards=not options['--no-display']) if options['--train']: evaluation.train() elif options['--test']: evaluation.test() else: evaluation.close() return os.path.relpath(evaluation.monitor.directory)
def test_evaluation(tmpdir): env = gym.make('CartPole-v0') agent = RandomAgent(env) evaluation = Evaluation(env, agent, directory=tmpdir.strpath, num_episodes=3, display_env=False, display_agent=False, display_rewards=False) evaluation.train() artifacts = tmpdir.listdir() assert any(['manifest' in file.basename for file in artifacts]) assert any(['metadata' in file.basename for file in artifacts]) assert any(['stats' in file.basename for file in artifacts])
def evaluate(experiment): # Prepare workspace seed, budget, agent_config, env_config, path = experiment gym.logger.set_level(gym.logger.DISABLED) path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) # Make environment env = load_environment(env_config) # Make agent agent_name, agent_config = agent_config agent_config["budget"] = int(budget) agent = agent_factory(env, agent_config) # Evaluate print("Evaluating agent {} with budget {} on seed {}".format( agent_name, budget, seed)) evaluation = Evaluation(env, agent, directory=Path("out") / "planners" / agent_name, num_episodes=1, sim_seed=seed, display_env=False, display_agent=False, display_rewards=False) evaluation.test() rewards = evaluation.monitor.stats_recorder.episode_rewards_[0] length = evaluation.monitor.stats_recorder.episode_lengths[0] total_reward = np.sum(rewards) return_ = np.sum([gamma**t * rewards[t] for t in range(len(rewards))]) # Save results result = { "agent": agent_name, "budget": budget, "seed": seed, "total_reward": total_reward, "return": return_, "length": length } df = pd.DataFrame.from_records([result]) with open(path, 'a') as f: df.to_csv(f, sep=',', encoding='utf-8', header=f.tell() == 0, index=False)
def test_evaluation(tmpdir): env = gym.make('CartPole-v0') agent = RandomAgent(env) evaluation = Evaluation(env, agent, directory=tmpdir.strpath, num_episodes=3, display_env=False, display_agent=False, display_rewards=False) evaluation.monitor._monitor = True # TODO: dirty fix until merge of https://github.com/openai/gym/pull/1362 evaluation.train() artifacts = tmpdir.listdir() assert any(['manifest' in file.basename for file in artifacts]) assert any(['metadata' in file.basename for file in artifacts]) assert any(['stats' in file.basename for file in artifacts])
def test_evaluation(tmpdir): env = gym.make('CartPole-v0') agent = RandomUniformAgent(env) evaluation = Evaluation(env, agent, directory=tmpdir, num_episodes=3, display_env=False, display_agent=False, display_rewards=False) evaluation.train() assert any([ 'manifest' in file.name for file in evaluation.run_directory.iterdir() ]) assert any([ 'metadata' in file.name for file in evaluation.run_directory.iterdir() ]) assert any( ['stats' in file.name for file in evaluation.run_directory.iterdir()])
def evaluate(env, model): agent_test = None run_directory = None options = { "--episodes_test": 400, "--seed": None, "--recover": False, "--recover-from": False, "--no-display": True, "--name-from-envconfig": True, "--model_save_freq": 50, "--video_save_freq": 1, "--create_episode_log": True, "--individual_episode_log_level": 2, "--create_timestep_log ": False, "--individual_reward_tensorboard": False, "--create_timestep_log": False, "--timestep_log_freq": False, "--episodes": 1000, "--environment": "stablebaselines_highway_attention_ppo" } evaluation_test = Evaluation(env, agent_test, run_directory=run_directory, num_episodes=int(options['--episodes_test']), sim_seed=options['--seed'], recover=options['--recover'] or options['--recover-from'], display_env=not options['--no-display'], display_agent=not options['--no-display'], display_rewards=not options['--no-display'], training=False, model=model, test_stable_baseline=True, options=options) evaluation_test.test()
def evaluate(agent_config): environment_config = 'configs/FiniteMDPEnv/haystack/env3.json' gym.logger.set_level(gym.logger.INFO) env = load_environment(environment_config) agent = agent_factory(env, agent_config) evaluation = Evaluation(env, agent, directory=None, num_episodes=1, display_env=False, display_agent=False, display_rewards=False) evaluation.test() evaluation.close() return evaluation.monitor.stats_recorder.episode_rewards[0]
def evaluate(environment_config, agent_config, options): """ Evaluate an agent interacting with an environment. :param environment_config: the path of the environment configuration file :param agent_config: the path of the agent configuration file :param options: the evaluation options """ gym.logger.set_level(gym.logger.INFO) env = load_environment(environment_config) agent = load_agent(agent_config, env) if options['--name-from-config']: directory = os.path.join( Evaluation.OUTPUT_FOLDER, os.path.basename(environment_config).split('.')[0], os.path.basename(agent_config).split('.')[0]) else: directory = None options['--seed'] = int( options['--seed']) if options['--seed'] is not None else None evaluation = Evaluation(env, agent, directory=directory, num_episodes=int(options['--episodes']), sim_seed=options['--seed'], display_env=not options['--no-display'], display_agent=not options['--no-display'], display_rewards=not options['--no-display']) if options['--train']: evaluation.train() elif options['--test']: evaluation.test() else: evaluation.close() if options['--analyze'] and not options['<benchmark>']: RunAnalyzer([evaluation.monitor.directory]) return os.path.relpath(evaluation.monitor.directory)
We use a policy architecture based on social attention, see [[Leurent and Mercat, 2019]](https://arxiv.org/abs/1911.12250). """ # Commented out IPython magic to ensure Python compatibility. from rl_agents.trainer.evaluation import Evaluation from rl_agents.agents.common.factory import load_agent, load_environment # Get the environment and agent configurations from the rl-agents repository # %cd /content/rl-agents/scripts/ env_config = 'configs/IntersectionEnv/env.json' agent_config = 'configs/IntersectionEnv/agents/DQNAgent/ego_attention_2h.json' env = load_environment(env_config) agent = load_agent(agent_config, env) evaluation = Evaluation(env, agent, num_episodes=3000, display_env=False) print(f"Ready to train {agent} on {env}") """Run tensorboard locally to visualize training.""" # Commented out IPython magic to ensure Python compatibility. # %tensorboard --logdir "{evaluation.directory}" """Start training. This should take about an hour.""" evaluation.train() """Progress can be visualised in the tensorboard cell above, which should update every 30s (or manually). You may need to click the *Fit domain to data* buttons below each graph. ## Testing Run the learned policy for a few episodes. """
def evaluate(experiment): # Prepare workspace seed, budget, agent_config, env_config, path = experiment gym.logger.set_level(gym.logger.DISABLED) path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) # Make environment env = load_environment(env_config) # Make agent agent_name, agent_config = agent_config agent_config["budget"] = int(budget) agent = agent_factory(env, agent_config) logger.debug("Evaluating agent {} with budget {} on seed {}".format( agent_name, budget, seed)) # Compute true value compute_regret = True compute_return = False if compute_regret: env.seed(seed) observation = env.reset() vi = agent_factory(env, agent_configs()["value_iteration"]) best_action = vi.act(observation) action = agent.act(observation) q = vi.state_action_value simple_regret = q[vi.mdp.state, best_action] - q[vi.mdp.state, action] gap = q[vi.mdp.state, best_action] - np.sort(q[vi.mdp.state, :])[-2] else: simple_regret = 0 gap = 0 if compute_return: # Evaluate evaluation = Evaluation(env, agent, directory=Path("out") / "planners" / agent_name, num_episodes=1, sim_seed=seed, display_env=False, display_agent=False, display_rewards=False) evaluation.test() rewards = evaluation.monitor.stats_recorder.episode_rewards_[0] length = evaluation.monitor.stats_recorder.episode_lengths[0] total_reward = np.sum(rewards) cum_discount = lambda signal: np.sum( [gamma**t * signal[t] for t in range(len(signal))]) return_ = cum_discount(rewards) mean_return = np.mean( [cum_discount(rewards[t:]) for t in range(len(rewards))]) else: length = 0 total_reward = 0 return_ = 0 mean_return = 0 # Save results result = { "agent": agent_name, "budget": budget, "seed": seed, "total_reward": total_reward, "return": return_, "mean_return": mean_return, "length": length, "simple_regret": simple_regret, "gap": gap } df = pd.DataFrame.from_records([result]) with open(path, 'a') as f: df.to_csv(f, sep=',', encoding='utf-8', header=f.tell() == 0, index=False)
""" # Commented out IPython magic to ensure Python compatibility. from rl_agents.trainer.evaluation import Evaluation from rl_agents.agents.common.factory import load_agent, load_environment # Get the environment and agent configurations from the rl-agents repository # !git clone https://github.com/eleurent/rl-agents.git # %cd /content/rl-agents/scripts/ env_config = 'configs/IntersectionEnv/env.json' agent_config = 'configs/IntersectionEnv/agents/DQNAgent/ego_attention_2h.json' env = load_environment(env_config) agent = load_agent(agent_config, env) # evaluation = Evaluation(env, agent, num_episodes=3000, display_env=False) evaluation = Evaluation(env, agent, num_episodes=5, display_env=False) print(f"Ready to train {agent} on {env}") """Run tensorboard locally to visualize training.""" # Commented out IPython magic to ensure Python compatibility. # %tensorboard --logdir "{evaluation.directory}" """Start training. This should take about an hour.""" # evaluation.train() """Progress can be visualised in the tensorboard cell above, which should update every 30s (or manually). You may need to click the *Fit domain to data* buttons below each graph. ## Testing Run the learned policy for a few episodes. """
import os os.chdir(rl_agents_dir + "/scripts/") env_config = 'configs/HighwayEnv/env.json' agent_config = 'configs/HighwayEnv/agents/DQNAgent/ddqn.json' print("test 3000") #env = load_environment(env_config) #agent = load_agent(agent_config, env) #evaluation = Evaluation(env, agent, num_episodes=2000, display_env=False) print("No train") #print(f"Ready to train {agent} on {env}") """Run tensorboard locally to visualize training.""" # Commented out IPython magic to ensure Python compatibility. # %tensorboard --logdir "{evaluation.directory}" """Start training. This should take about an hour.""" #evaluation.train() """Progress can be visualised in the tensorboard cell above, which should update every 30s (or manually). You may need to click the *Fit domain to data* buttons below each graph. ## Testing Run the learned policy for a few episodes. """ env = load_environment(env_config) env.configure({"offscreen_rendering": True}) agent = load_agent(agent_config, env) evaluation = Evaluation(env, agent, num_episodes=150, recover=True) evaluation.test() show_videos(evaluation.run_directory)
"gamma": 0.75, #0.8 "n_steps": 1, "batch_size": 32, #32 "memory_capacity": 15000, "target_update": 50, "exploration": { "method": "EpsilonGreedy", "tau": 6000, "temperature": 1.0, "final_temperature": 0.05 }, "loss_function": "l2" } agent = agent_factory(env, agent_config) obs, done = env.reset(), False evaluation = Evaluation(env, agent, num_episodes=3000, display_env=False) # Run episode for step in trange(env.unwrapped.config["duration"], desc="Running..."): action = agent.act(obs) print(action) obs, reward, done, info = env.step(action) env.render() if done: env.reset()
def evaluate(experiment): # Prepare workspace seed, agent_config, env_config, path = experiment gym.logger.set_level(gym.logger.DISABLED) path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) # Make environment env = load_environment(env_config) # Make agent agent_name, agent_config = agent_config agent = load_agent(agent_config, env) # Evaluate print("Evaluating agent {} on seed {}".format(agent_name, seed)) evaluation = Evaluation(env, agent, directory=path.parent / agent_name, num_episodes=1, sim_seed=seed, display_env=True, display_agent=True, display_rewards=False) estimate_value = False if estimate_value: rewards, values, terminal = [], [], False evaluation.seed(episode=0) evaluation.reset() evaluation.training = False gamma = 0.99 or agent.config["gamma"] while not terminal: # Estimate state value oracle_env = safe_deepcopy_env(agent.env) oracle = load_agent(agent_configs()["oracle"], oracle_env) oracle_done, oracle_rewards = False, [] while not oracle_done: action = oracle.act(None) _, oracle_reward, oracle_done, _ = oracle_env.step(action) oracle_rewards.append(oracle_reward) value = np.sum([ gamma**t * oracle_rewards[t] for t in range(len(oracle_rewards)) ]) values.append(value) reward, terminal = evaluation.step() rewards.append(reward) evaluation.close() returns = [ np.sum( [gamma**t * rewards[k + t] for t in range(len(rewards[k:]))]) for k in range(len(rewards)) ] # Save intermediate results df = pd.DataFrame({ "agent": agent_name, "time": range(len(rewards)), "seed": [seed] * len(rewards), "reward": rewards, "return": returns, "value": values }) else: evaluation.test() rewards = evaluation.monitor.stats_recorder.episode_rewards_[0] length = evaluation.monitor.stats_recorder.episode_lengths[0] total_reward = np.sum(rewards) cum_discount = lambda signal, gamma: np.sum( [gamma**t * signal[t] for t in range(len(signal))]) return_ = cum_discount(rewards, 0.9) return_undisc = cum_discount(rewards, 0.99) result = { "agent": agent_name, "seed": seed, "total_reward": total_reward, "return": return_, "return_undisc": return_undisc, "length": length, } df = pd.DataFrame.from_records([result]) with open(path, 'a') as f: df.to_csv(f, sep=',', encoding='utf-8', header=f.tell() == 0, index=False)
def evaluate(environment_config, agent_config, options): """ Evaluate an agent interacting with an environment. :param environment_config: the path of the environment configuration file :param agent_config: the path of the agent configuration file :param options: the evaluation options """ logger.configure(LOGGING_CONFIG) if options['--verbose']: logger.configure(VERBOSE_CONFIG) run_directory = None if options['--name-from-config']: run_directory = "{}_{}_{}".format( Path(agent_config).with_suffix('').name, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'), os.getpid()) options['--seed'] = int( options['--seed']) if options['--seed'] is not None else None env = load_environment(environment_config) if agent_config == "None": agent_config = env.config["agent_config"] if "auto_tau" in agent_config["exploration"] and ( agent_config["exploration"]["auto_tau"]): agent_config["exploration"]["tau"] = env.config[ "policy_frequency"] * env.config["duration"] * int( options['--episodes'] * env.config["controlled_vehicles"]) / 50 agent = load_agent(agent_config, env) # TODO diferent display options for agent, env, rewards if options['--offscreen_rendering']: env.config['offscreen_rendering'] = True evaluation_train = Evaluation(env, agent, run_directory=run_directory, num_episodes=int(options['--episodes']), sim_seed=options['--seed'], recover=options['--recover'] or options['--recover-from'], display_env=not options['--no-display'], display_agent=not options['--no-display'], display_rewards=not options['--no-display'], training=options['--train'], options=options) if options['--train']: evaluation_train.train() else: evaluation_train.close() if options['--test']: agent_test = load_agent(agent_config, env) if options['--train']: agent_test = evaluation_train.agent evaluation_test = Evaluation( env, agent_test, run_directory=run_directory, num_episodes=int(options['--episodes_test']), sim_seed=options['--seed'], recover=options['--recover'] or options['--recover-from'], display_env=not options['--no-display'], display_agent=not options['--no-display'], display_rewards=not options['--no-display'], training=False, options=options) evaluation_test.test()