def setUp(self):
        parser = configparser.ConfigParser()
        parser.read(config_file_path)

        self.agents_path = parser.get("DEFAULT", "agents_dir")
        self.cache_dir = os.path.join(self.agents_path, "_cache")
        if not os.path.isdir(self.cache_dir):
            from tests.test_make_cache import TestMakeCache

            test_make_cache = TestMakeCache()
            test_make_cache.setUp()
            test_make_cache.test_make_cache()
        self.agent_name = "do-nothing-baseline"
        self.scenario_name = "000"
        self.env_path = parser.get("DEFAULT", "env_dir")
        p = Parameters()
        p.NO_OVERFLOW_DISCONNECTION = False
        self.env = make(
            self.env_path,
            backend=PandaPowerBackend(),
            test=True,
            param=p,
        )
        self.env.seed(0)
        params_for_runner = self.env.get_params_for_runner()
        params_to_fetch = ["init_grid_path"]
        self.params_for_reboot = {
            key: value
            for key, value in params_for_runner.items()
            if key in params_to_fetch
        }
        self.params_for_reboot["parameters"] = p

        cache_file = os.path.join(self.cache_dir, self.scenario_name,
                                  self.agent_name + ".dill")
        try:
            with open(cache_file, "rb") as f:
                episode_analytics = dill.load(f)
        except:
            episode_analytics = EpisodeAnalytics(self.episode_data,
                                                 self.scenario_name,
                                                 self.agent_name)
        self.episode_data = EpisodeData.from_disk(
            os.path.join(self.agents_path, self.agent_name),
            self.scenario_name)
        episode_analytics.decorate(self.episode_data)
        self.episode = episode_analytics
        self.act = self.env.action_space()
        self.expert_config = {
            "totalnumberofsimulatedtopos": 25,
            "numberofsimulatedtopospernode": 5,
            "maxUnusedLines": 2,
            "ratioToReconsiderFlowDirection": 0.75,
            "ratioToKeepLoop": 0.25,
            "ThersholdMinPowerOfLoop": 0.1,
            "ThresholdReportOfLine": 0.2,
        }
        self.obs_reboot = None
        self.reward_type = "MinMargin_reward"
示例#2
0
    def setUp(self):
        parser = configparser.ConfigParser()
        parser.read(config_file_path)

        self.agents_path = parser.get("DEFAULT", "agents_dir")
        self.cache_dir = os.path.join(self.agents_path, "_cache")
        if not os.path.isdir(self.cache_dir):
            from tests.test_make_cache import TestMakeCache

            test_make_cache = TestMakeCache()
            test_make_cache.setUp()
            test_make_cache.test_make_cache()
        self.agent_name = "do-nothing-baseline"
        self.scenario_name = "000"
        self.env_path = parser.get("DEFAULT", "env_dir")
        p = Parameters()
        p.NO_OVERFLOW_DISCONNECTION = False
        self.env = make(
            self.env_path,
            backend=PandaPowerBackend(),
            test=True,
            param=p,
        )
        self.env.seed(0)
        params_for_runner = self.env.get_params_for_runner()
        params_to_fetch = ["init_grid_path"]
        self.params_for_reboot = {
            key: value
            for key, value in params_for_runner.items()
            if key in params_to_fetch
        }
        self.params_for_reboot["parameters"] = p

        cache_file = os.path.join(self.cache_dir, self.scenario_name,
                                  self.agent_name + ".dill")
        try:
            with open(cache_file, "rb") as f:
                episode_analytics = dill.load(f)
        except:
            episode_analytics = EpisodeAnalytics(self.episode_data,
                                                 self.scenario_name,
                                                 self.agent_name)
        self.episode_data = EpisodeData.from_disk(
            os.path.join(self.agents_path, self.agent_name),
            self.scenario_name)
        episode_analytics.decorate(self.episode_data)
        self.episode = episode_analytics
        self.episode_reboot = EpisodeReboot.EpisodeReboot()
        self.episode_reboot.load(
            self.env.backend,
            data=self.episode,
            agent_path=os.path.join(self.agents_path, self.agent_name),
            name=self.episode.episode_name,
            env_kwargs=self.params_for_reboot,
        )
        self.obs, *_ = self.episode_reboot.go_to(1895)
        self.act = self.env.action_space()
示例#3
0
    def test_plot(self):
        self.episode_data = EpisodeData.from_disk(
            os.path.join(self.agents_path, self.agent_name), self.scenario_name
        )
        self.episode_analytics = EpisodeAnalytics(
            self.episode_data, self.scenario_name, self.agent_name
        )
        self.episode_analytics.decorate(self.episode_data)

        make_network(self.episode_analytics).plot_obs(self.episode_analytics.observations[0])
示例#4
0
def compute_episode(episode_name, agent):
    print(f"Loading from logs agent {agent} on scenario {episode_name}...")
    beg = time.time()
    episode_data = retrieve_episode_from_disk(episode_name, agent)
    episode_analytics = EpisodeAnalytics(episode_data, episode_name, agent)
    save_in_fs_cache(episode_name, agent, episode_analytics)
    episode_analytics.decorate(episode_data)
    end = time.time()
    print(
        f"Agent {agent} on scenario {episode_name} loaded from logs in: {(end - beg):.1f} s"
    )
    return episode_analytics
    def test_multi_topo(self):
        self.agent_name = "multiTopology-baseline"
        self.scenario_name = "000"
        self.episode_data = EpisodeData.from_disk(
            os.path.join(self.agents_path, self.agent_name), self.scenario_name
        )
        self.episode_analytics = EpisodeAnalytics(
            self.episode_data, self.scenario_name, self.agent_name
        )

        nb_actions = self.episode_analytics.action_data_table[
            ["action_line", "action_subs"]
        ].sum()
        self.assertEqual(nb_actions.action_line, 25.0)
        self.assertEqual(nb_actions.action_subs, 38.0)

        action_per_line = get_action_per_line(self.episode_analytics)
        action_per_sub = get_action_per_sub(self.episode_analytics)

        # We need to sort the labels for which values are equal.
        # Otherwise, the output is random.
        self.assertListEqual(sorted(action_per_sub[0].x.tolist()), ["sub_3", "sub_5"])
        self.assertListEqual(action_per_sub[0].y.tolist(), [19, 19])
        self.assertListEqual(action_per_line[0].x.tolist(), ["3_6_15", "9_10_12"])
        self.assertListEqual(action_per_line[0].y.tolist(), [13, 12])

        self.assertListEqual(
            self.episode_analytics.action_data_table.action_id[:5].tolist(),
            [0, 1, 1, 2, 3],
        )
        self.assertListEqual(
            self.episode_analytics.action_data_table.distance[:5].tolist(),
            [1, 2, 2, 0, 3],
        )
示例#6
0
def make_episode_without_decorate(agent, episode_name):
    """
    Load episode from cache without decorating with the EpisodeData attributes
    This is needed to use multiprocessing which pickles/unpickles the results.

    :param agent: Agent Name
    :param episode_name: Name of the studied episode
    :return: Episode with computed data (without EpisodeData attributes), EpisodeData instance
    """
    if is_in_ram_cache(episode_name, agent):
        return get_from_ram_cache(episode_name, agent)
    elif is_in_fs_cache(episode_name, agent):
        beg = time.time()
        path = get_fs_cached_file(episode_name, agent)
        print(
            f"Loading from filesystem cache agent {agent} on scenario {episode_name}..."
        )
        with open(path, "rb") as f:
            episode_analytics = dill.load(f)
        end = time.time()
        print(
            f"Agent {agent} on scenario {episode_name} loaded from filesystem cache in: {(end - beg):.1f} s"
        )
        return episode_analytics
    else:
        episode_data = retrieve_episode_from_disk(episode_name, agent)
        if episode_data is not None:
            episode_analytics = EpisodeAnalytics(episode_data, episode_name,
                                                 agent)
            save_in_fs_cache(episode_name, agent, episode_analytics)
            return episode_analytics
        else:
            return None
示例#7
0
class TestGenerateAgent(unittest.TestCase):
    def setUp(self):
        self.case = "rte_case14_realistic"
        self.backend = PandaPowerBackend()
        self.param = Parameters()

        self.agents_path = agents_path
        self.agent_name = "redispatching-baseline"
        self.scenario_name = "000"

    def test_generate_and_read_agent_redisp(self):
        with make(self.case, param=self.param, backend=self.backend) as env:
            agent = RandomRedispatchAgent(env.action_space, env)
            runner = Runner(**env.get_params_for_runner(),
                            agentClass=None,
                            agentInstance=agent)
            # need to be seeded for reproducibility as this takes random redispatching actions
            runner.run(
                nb_episode=1,
                path_save=os.path.join(self.agents_path, self.agent_name),
                nb_process=1,
                max_iter=10,
                env_seeds=[0],
                agent_seeds=[0],
                pbar=True,
            )
            env.close()

        self.episode_data = EpisodeData.from_disk(
            os.path.join(self.agents_path, self.agent_name),
            self.scenario_name)
        self.episode_analytics = EpisodeAnalytics(self.episode_data,
                                                  self.scenario_name,
                                                  self.agent_name)
 def setUp(self):
     self.agents_path = agents_path
     self.agent_name = "greedy-baseline"
     self.scenario_name = "000"
     self.episode_data = EpisodeData.from_disk(
         os.path.join(self.agents_path, self.agent_name), self.scenario_name
     )
     self.episode_analytics = EpisodeAnalytics(
         self.episode_data, self.scenario_name, self.agent_name
     )
示例#9
0
class TestGenerateAgent(unittest.TestCase):
    def setUp(self):
        self.case = "rte_case14_realistic"

        self.backend = PandaPowerBackend()
        self.param = Parameters()

        self.agents_path = agents_path
        self.agent_name = 'redispatching-baseline'
        self.scenario_name = '000'

    def test_plot(self):
        self.episode_data = EpisodeData.from_disk(
            os.path.join(self.agents_path, self.agent_name), self.scenario_name
        )
        self.episode_analytics = EpisodeAnalytics(
            self.episode_data, self.scenario_name, self.agent_name
        )
        self.episode_analytics.decorate(self.episode_data)

        make_network(self.episode_analytics).plot_obs(self.episode_analytics.observations[0])
示例#10
0
def compute_episode(episode_name, agent):
    episode_data = retrieve_episode_from_disk(episode_name, agent)
    episode_analytics = EpisodeAnalytics(episode_data, episode_name, agent)
    save_in_fs_cache(episode_name, agent, episode_analytics)
    episode_analytics.decorate(episode_data)
    return episode_analytics