def test_multi_topo(self): self.agent_name = "multiTopology-baseline" self.scenario_name = "000" self.episode_data = EpisodeData.from_disk( os.path.join(self.agents_path, self.agent_name), self.scenario_name ) self.episode_analytics = EpisodeAnalytics( self.episode_data, self.scenario_name, self.agent_name ) nb_actions = self.episode_analytics.action_data_table[ ["action_line", "action_subs"] ].sum() self.assertEqual(nb_actions.action_line, 25.0) self.assertEqual(nb_actions.action_subs, 38.0) action_per_line = get_action_per_line(self.episode_analytics) action_per_sub = get_action_per_sub(self.episode_analytics) # We need to sort the labels for which values are equal. # Otherwise, the output is random. self.assertListEqual(sorted(action_per_sub[0].x.tolist()), ["sub_3", "sub_5"]) self.assertListEqual(action_per_sub[0].y.tolist(), [19, 19]) self.assertListEqual(action_per_line[0].x.tolist(), ["3_6_15", "9_10_12"]) self.assertListEqual(action_per_line[0].y.tolist(), [13, 12]) self.assertListEqual( self.episode_analytics.action_data_table.action_id[:5].tolist(), [0, 1, 1, 2, 3], ) self.assertListEqual( self.episode_analytics.action_data_table.distance[:5].tolist(), [1, 2, 2, 0, 3], )
class TestGenerateAgent(unittest.TestCase): def setUp(self): self.case = "rte_case14_realistic" self.backend = PandaPowerBackend() self.param = Parameters() self.agents_path = agents_path self.agent_name = "redispatching-baseline" self.scenario_name = "000" def test_generate_and_read_agent_redisp(self): with make(self.case, param=self.param, backend=self.backend) as env: agent = RandomRedispatchAgent(env.action_space, env) runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent) # need to be seeded for reproducibility as this takes random redispatching actions runner.run( nb_episode=1, path_save=os.path.join(self.agents_path, self.agent_name), nb_process=1, max_iter=10, env_seeds=[0], agent_seeds=[0], pbar=True, ) env.close() self.episode_data = EpisodeData.from_disk( os.path.join(self.agents_path, self.agent_name), self.scenario_name) self.episode_analytics = EpisodeAnalytics(self.episode_data, self.scenario_name, self.agent_name)
def setUp(self): parser = configparser.ConfigParser() parser.read(config_file_path) self.agents_path = parser.get("DEFAULT", "agents_dir") self.cache_dir = os.path.join(self.agents_path, "_cache") if not os.path.isdir(self.cache_dir): from tests.test_make_cache import TestMakeCache test_make_cache = TestMakeCache() test_make_cache.setUp() test_make_cache.test_make_cache() self.agent_name = "do-nothing-baseline" self.scenario_name = "000" self.env_path = parser.get("DEFAULT", "env_dir") p = Parameters() p.NO_OVERFLOW_DISCONNECTION = False self.env = make( self.env_path, backend=PandaPowerBackend(), test=True, param=p, ) self.env.seed(0) params_for_runner = self.env.get_params_for_runner() params_to_fetch = ["init_grid_path"] self.params_for_reboot = { key: value for key, value in params_for_runner.items() if key in params_to_fetch } self.params_for_reboot["parameters"] = p cache_file = os.path.join(self.cache_dir, self.scenario_name, self.agent_name + ".dill") try: with open(cache_file, "rb") as f: episode_analytics = dill.load(f) except: episode_analytics = EpisodeAnalytics(self.episode_data, self.scenario_name, self.agent_name) self.episode_data = EpisodeData.from_disk( os.path.join(self.agents_path, self.agent_name), self.scenario_name) episode_analytics.decorate(self.episode_data) self.episode = episode_analytics self.act = self.env.action_space() self.expert_config = { "totalnumberofsimulatedtopos": 25, "numberofsimulatedtopospernode": 5, "maxUnusedLines": 2, "ratioToReconsiderFlowDirection": 0.75, "ratioToKeepLoop": 0.25, "ThersholdMinPowerOfLoop": 0.1, "ThresholdReportOfLine": 0.2, } self.obs_reboot = None self.reward_type = "MinMargin_reward"
def load(self, backend, agent_path=None, name=None, data=None, env_kwargs={}): if data is None: if agent_path is not None and name is not None: self.episode_data = EpisodeData.from_disk(agent_path, name) else: raise Grid2OpException( "To replay an episode you need at least to provide an EpisodeData " "(using the keyword argument \"data=...\") or provide the path and name where " "the " "episode is stored (keyword arguments \"agent_path\" and \"name\")." ) else: self.episode_data = copy.deepcopy(data) self.episode_data.reboot() self.chronics_handler = ChronicsHandler(chronicsClass=_GridFromLog, episode_data=self.episode_data) if "chronics_handler" in env_kwargs: del env_kwargs["chronics_handler"] if "backend" in env_kwargs: del env_kwargs["backend"] if "opponent_class" in env_kwargs: del env_kwargs["opponent_class"] if "name" in env_kwargs: del env_kwargs["name"] nm = "unknonwn" seed = None with open(os.path.join(agent_path, name, "episode_meta.json")) as f: dict_ = json.load(f) nm = re.sub("Environment_", "", dict_["env_type"]) if dict_["env_seed"] is not None: seed = int(dict_["env_seed"]) self.env = Environment(**env_kwargs, backend=backend, chronics_handler=self.chronics_handler, opponent_class=OpponentFromLog, name=nm) if seed is not None: self.env.seed(seed) tmp = self.env.reset() # always have the two bellow synch ! otherwise it messes up the "chronics" # in the env, when calling "env.step" self.current_time_step = 0 self.env.chronics_handler.real_data.curr_iter = 0 # first observation of the scenario current_obs = self.episode_data.observations[self.current_time_step] self._assign_state(current_obs) return self.env.get_obs()
def setUp(self): parser = configparser.ConfigParser() parser.read(config_file_path) self.agents_path = parser.get("DEFAULT", "agents_dir") self.cache_dir = os.path.join(self.agents_path, "_cache") if not os.path.isdir(self.cache_dir): from tests.test_make_cache import TestMakeCache test_make_cache = TestMakeCache() test_make_cache.setUp() test_make_cache.test_make_cache() self.agent_name = "do-nothing-baseline" self.scenario_name = "000" self.env_path = parser.get("DEFAULT", "env_dir") p = Parameters() p.NO_OVERFLOW_DISCONNECTION = False self.env = make( self.env_path, backend=PandaPowerBackend(), test=True, param=p, ) self.env.seed(0) params_for_runner = self.env.get_params_for_runner() params_to_fetch = ["init_grid_path"] self.params_for_reboot = { key: value for key, value in params_for_runner.items() if key in params_to_fetch } self.params_for_reboot["parameters"] = p cache_file = os.path.join(self.cache_dir, self.scenario_name, self.agent_name + ".dill") try: with open(cache_file, "rb") as f: episode_analytics = dill.load(f) except: episode_analytics = EpisodeAnalytics(self.episode_data, self.scenario_name, self.agent_name) self.episode_data = EpisodeData.from_disk( os.path.join(self.agents_path, self.agent_name), self.scenario_name) episode_analytics.decorate(self.episode_data) self.episode = episode_analytics self.episode_reboot = EpisodeReboot.EpisodeReboot() self.episode_reboot.load( self.env.backend, data=self.episode, agent_path=os.path.join(self.agents_path, self.agent_name), name=self.episode.episode_name, env_kwargs=self.params_for_reboot, ) self.obs, *_ = self.episode_reboot.go_to(1895) self.act = self.env.action_space()
def setUp(self): self.agents_path = agents_path self.agent_name = "greedy-baseline" self.scenario_name = "000" self.episode_data = EpisodeData.from_disk( os.path.join(self.agents_path, self.agent_name), self.scenario_name ) self.episode_analytics = EpisodeAnalytics( self.episode_data, self.scenario_name, self.agent_name )
def test_plot(self): self.episode_data = EpisodeData.from_disk( os.path.join(self.agents_path, self.agent_name), self.scenario_name ) self.episode_analytics = EpisodeAnalytics( self.episode_data, self.scenario_name, self.agent_name ) self.episode_analytics.decorate(self.episode_data) make_network(self.episode_analytics).plot_obs(self.episode_analytics.observations[0])
def replay_episode(self, episode_id, fps=2.0, gif_name=None, display=True, start_step=0, end_step=-1, line_info="rho", load_info="p", gen_info="p", resolution=(1280, 720)): """ When called, this function will start the display of the episode in a "mini movie" format. Parameters ---------- episode_id: ``str`` ID of the episode to replay fps: ``float`` Frames per second. When it's low, you will have more time to look at each frame, but the episode will last longer. When it's high, episode will be faster, but frames will stay less time on the screen. gif_name: ``str`` If provided, a .gif file is saved in the episode folder with the name :gif_name:. The .gif extension is appened by this function start_step: ``int`` Default to 0. The step at which to start generating the gif end_step: ``int`` Default to -1. The step at which to stop generating the gif. Set to -1 to specify no limit load_info: ``str`` Defaults to "p". What kind of values to show on loads. Can be oneof `["p", "v", None]` gen_info: ``str`` Defaults to "p". What kind of values to show on generators. Can be oneof `["p", "v", None]` line_info: ``str`` Defaults to "rho". What kind of values to show on lines. Can be oneof `["rho", "a", "p", "v", None]` resolution: ``tuple`` Defaults to (1280, 720). The resolution to use for the gif. """ # Check args path_ep = os.path.join(self.agent_path, episode_id) if not os.path.exists(path_ep): raise Grid2OpException( "No episode is found at \"{}\".".format(path_ep)) # Load episode observations self.episode_data = EpisodeData.from_disk(agent_path=self.agent_path, name=episode_id) all_obs = [el for el in self.episode_data.observations] # Create a plotter width, height = resolution plot_runner = PlotMatplot(self.episode_data.observation_space, width=width, height=height, load_name=False, gen_name=False) # Some vars for gif export if enabled frames = [] gif_path = None if gif_name is not None: gif_path = os.path.join(path_ep, gif_name + ".gif") # Render loop figure = None time_per_frame = 1.0 / fps for step, obs in enumerate(all_obs): # Skip up to start_step if step < start_step: continue # Terminate if reached end_step if end_step > 0 and step >= end_step: break # Get a timestamp for current frame start_time = time.time() # Render the observation fig = plot_runner.plot_obs(observation=obs, line_info=line_info, gen_info=gen_info, load_info=load_info, figure=figure, redraw=True) if figure is None and display: fig.show() elif display: fig.canvas.draw() # Store figure for re-use figure = fig # Save pixel array if needed if gif_name is not None: frames.append(plot_runner.convert_figure_to_numpy_HWC(figure)) # Get the timestamp after frame is rendered end_time = time.time() delta_time = end_time - start_time # Cap fps for display mode if display: wait_time = time_per_frame - delta_time if wait_time > 0.0: time.sleep(wait_time) # Export all frames as gif if enabled if gif_name is not None and len(frames) > 0: try: imageio.mimwrite(gif_path, frames, fps=fps) # Try to compress try: from pygifsicle import optimize optimize(gif_path, options=["-w", "--no-conserve-memory"]) except: warn_msg = "Failed to optimize .GIF size, but gif is still saved:\n" \ "Install dependencies to reduce size by ~3 folds\n" \ "apt-get install gifsicle && pip3 install pygifsicle" warnings.warn(warn_msg) except Exception as e: warnings.warn( "Impossible to save gif with error :\n{}".format(e))
def replay_episode(self, episode_id, fps=2.0, gif_name=None, display=True): """ When called, this function will start the display of the episode in a "mini movie" format. Parameters ---------- episode_id: ``str`` ID of the episode to replay fps: ``float`` Frames per second. When it's low, you will have more time to look at each frame, but the episode will last longer. When it's high, episode will be faster, but frames will stay less time on the screen. gif_name: ``str`` If provided, a .gif file is saved in the episode folder with the name :gif_name:. The .gif extension is appened by this function """ # Check args path_ep = os.path.join(self.agent_path, episode_id) if not os.path.exists(path_ep): raise Grid2OpException( "No episode is found at \"{}\".".format(path_ep)) # Load episode observations self.episode_data = EpisodeData.from_disk(agent_path=self.agent_path, name=episode_id) all_obs = [el for el in self.episode_data.observations] # Create a plotter plot_runner = PlotMatplot(self.episode_data.observation_space) # Some vars for gif export if enabled frames = [] gif_path = None if gif_name is not None: gif_path = os.path.join(path_ep, gif_name + ".gif") # Render loop figure = None time_per_frame = 1.0 / fps for obs in all_obs: # Get a timestamp for current frame start_time = time.time() # Render the observation fig = plot_runner.plot_obs(observation=obs, figure=figure, redraw=True) if figure is None and display: fig.show() elif display: fig.canvas.draw() # Store figure for re-use figure = fig # Save pixel array if needed if gif_name is not None: frames.append(plot_runner.convert_figure_to_numpy_HWC(figure)) # Get the timestamp after frame is rendered end_time = time.time() delta_time = end_time - start_time # Cap fps for display mode if display: wait_time = time_per_frame - delta_time if wait_time > 0.0: time.sleep(wait_time) # Export all frames as gif if enabled if gif_name is not None: imageio.mimwrite(gif_path, frames, fps=fps) # Try to compress try: from pygifsicle import optimize optimize(gif_path, options=["-w", "--no-conserve-memory"]) except: warn_msg = "Failed to optimize .GIF size, but gif is still saved:\n" \ "Install dependencies to reduce size by ~3 folds\n" \ "apt-get install gifsicle && pip3 install pygifsicle" warnings.warn(warn_msg)