Ejemplo n.º 1
0
    def test_donothing_noov_80(self):
        """test that do nothing has a score of 80.0 if it is run with "no overflow disconnection" """
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make("rte_case5_example", test=True) as env:
                # I cannot decrease the max step: it must be above the number of steps the do nothing does
                scores = ScoreL2RPN2020(env,
                                        nb_scenario=2,
                                        verbose=0,
                                        max_step=130)
                assert scores._recomputed_dn
                assert scores._recomputed_no_ov

                # the statistics have been properly computed
                assert os.path.exists(
                    os.path.join(
                        env.get_path_env(),
                        EpisodeStatistics.get_name_dir(
                            ScoreL2RPN2020.NAME_DN)))
                assert os.path.exists(
                    os.path.join(
                        env.get_path_env(),
                        EpisodeStatistics.get_name_dir(
                            ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))

                my_agent = DoNothingAgent(env.action_space)
                my_scores, *_ = scores.get(my_agent)
                assert np.max(
                    np.abs(my_scores
                           )) <= self.tol_one, "error for the first do nothing"

            param = Parameters()
            param.NO_OVERFLOW_DISCONNECTION = True
            with make("rte_case5_example", test=True, param=param) as env:
                scores2 = ScoreL2RPN2020(env,
                                         nb_scenario=2,
                                         verbose=0,
                                         max_step=130)
                assert not scores2._recomputed_dn
                assert not scores2._recomputed_no_ov
                my_agent = DoNothingAgent(env.action_space)
                my_scores, *_ = scores2.get(my_agent)
                assert np.max(
                    np.abs(np.array(my_scores) - 80.0)) <= self.tol_one

            # delete them
            stats_0 = EpisodeStatistics(env, ScoreL2RPN2020.NAME_DN)
            stats_1 = EpisodeStatistics(env,
                                        ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)
            stats_0.clear_all()
            stats_1.clear_all()
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN)))
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(
                        ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
Ejemplo n.º 2
0
def main(max_ts, ENV_NAME, test=True):
    backend = LightSimBackend()
    param = Parameters()
    param.init_from_dict({"NO_OVERFLOW_DISCONNECTION": True})

    env_klu = make(ENV_NAME,
                   backend=backend,
                   param=param,
                   test=test,
                   data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
    agent = DoNothingAgent(action_space=env_klu.action_space)
    nb_ts_klu, time_klu, aor_klu, gen_p_klu, gen_q_klu = run_env(env_klu,
                                                                 max_ts,
                                                                 agent,
                                                                 chron_id=0)

    env_pp = make(ENV_NAME,
                  param=param,
                  test=test,
                  data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
    agent = DoNothingAgent(action_space=env_pp.action_space)
    nb_ts_pp, time_pp, aor_pp, gen_p_pp, gen_q_pp = run_env(env_pp,
                                                            max_ts,
                                                            agent,
                                                            chron_id=0)

    print_res(env_klu, env_pp, nb_ts_klu, nb_ts_pp, time_klu, time_pp, aor_klu,
              aor_pp, gen_p_klu, gen_p_pp, gen_q_klu, gen_q_pp)
Ejemplo n.º 3
0
    def test_modif_max_step_decrease(self):
        """
        test that i can modify the max step by decreaseing it (and in that case it does not trigger a recomputation
        of the statistics)
        """
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make("rte_case5_example", test=True) as env:
                scores = ScoreL2RPN2020(env,
                                        nb_scenario=2,
                                        verbose=0,
                                        max_step=15)

                # the statistics have been properly computed
                assert os.path.exists(
                    os.path.join(
                        env.get_path_env(),
                        EpisodeStatistics.get_name_dir(
                            ScoreL2RPN2020.NAME_DN)))
                assert os.path.exists(
                    os.path.join(
                        env.get_path_env(),
                        EpisodeStatistics.get_name_dir(
                            ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))

                my_agent = DoNothingAgent(env.action_space)
                my_scores, *_ = scores.get(my_agent)
                assert np.max(
                    np.abs(my_scores
                           )) <= self.tol_one, "error for the first do nothing"

                scores2 = ScoreL2RPN2020(env,
                                         nb_scenario=2,
                                         verbose=0,
                                         max_step=10)
                assert not scores2._recomputed_dn
                assert not scores2._recomputed_no_ov
                my_agent = DoNothingAgent(env.action_space)
                my_scores2, *_ = scores2.get(my_agent)
                assert np.max(
                    np.abs(my_scores2)
                ) <= self.tol_one, "error for the second do nothing"

            # delete them
            stats_0 = EpisodeStatistics(env, ScoreL2RPN2020.NAME_DN)
            stats_1 = EpisodeStatistics(env,
                                        ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)
            stats_0.clear_all()
            stats_1.clear_all()
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN)))
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(
                        ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
Ejemplo n.º 4
0
 def test_0_donothing(self):
     agent = DoNothingAgent(self.env.helper_action_player)
     i, cum_reward = self._aux_test_agent(agent)
     assert i == 31, "The powerflow diverged before step 30 for do nothing"
     assert np.abs(
         cum_reward - 35140.02895
     ) <= self.tol_one, "The reward has not been properly computed"
Ejemplo n.º 5
0
    def test_donothing_0(self):
        """test that do nothing has a score of 0.00"""
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make("rte_case5_example", test=True) as env:
                scores = ScoreL2RPN2020(env, nb_scenario=4, verbose=0, max_step=20)

                # the statistics have been properly computed
                assert os.path.exists(os.path.join(env.get_path_env(),
                                                   EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN)))
                assert os.path.exists(os.path.join(env.get_path_env(),
                                                   EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))

                my_agent = DoNothingAgent(env.action_space)
                my_scores, *_ = scores.get(my_agent)
                assert np.max(np.abs(my_scores)) <= self.tol_one

            # delete them
            stats_0 = EpisodeStatistics(env, ScoreL2RPN2020.NAME_DN)
            stats_1 = EpisodeStatistics(env, ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)
            stats_2 = EpisodeStatistics(env, ScoreL2RPN2020.NAME_RP_NO_OVERWLOW)
            stats_0.clear_all()
            stats_1.clear_all()
            stats_2.clear_all()
            assert not os.path.exists(os.path.join(env.get_path_env(),
                                                   EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN)))
            assert not os.path.exists(os.path.join(env.get_path_env(),
                                                   EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
            assert not os.path.exists(os.path.join(env.get_path_env(),
                                                   EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_RP_NO_OVERWLOW)))
Ejemplo n.º 6
0
    def test_modif_nb_scenario(self):
        """
        test that i can modify the nb_scenario and it properly recomputes it when it increased and not
        when it decreases
        """
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make("rte_case5_example", test=True) as env:
                scores = ScoreL2RPN2020(env,
                                        nb_scenario=2,
                                        verbose=0,
                                        max_step=5)

                # the statistics have been properly computed
                assert os.path.exists(
                    os.path.join(
                        env.get_path_env(),
                        EpisodeStatistics.get_name_dir(
                            ScoreL2RPN2020.NAME_DN)))
                assert os.path.exists(
                    os.path.join(
                        env.get_path_env(),
                        EpisodeStatistics.get_name_dir(
                            ScoreL2RPN2020.NAME_RP_NO_OVERFLOW)))

                my_agent = DoNothingAgent(env.action_space)
                my_scores, *_ = scores.get(my_agent)
                assert np.max(
                    np.abs(my_scores
                           )) <= self.tol_one, "error for the first do nothing"

                scores2 = ScoreL2RPN2020(env,
                                         nb_scenario=4,
                                         verbose=0,
                                         max_step=5)
                assert scores2._recomputed_dn
                assert scores2._recomputed_no_ov_rp

                scores2 = ScoreL2RPN2020(env,
                                         nb_scenario=3,
                                         verbose=0,
                                         max_step=5)
                assert not scores2._recomputed_dn
                assert not scores2._recomputed_no_ov_rp

            # delete them
            # delete them
            scores.clear_all()
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN)))
            # assert not os.path.exists(os.path.join(env.get_path_env(),
            #                                        EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(
                        ScoreL2RPN2020.NAME_RP_NO_OVERFLOW)))
Ejemplo n.º 7
0
 def test_0_donothing(self):
     agent = DoNothingAgent(self.env.action_space)
     with warnings.catch_warnings():
         warnings.filterwarnings("error")
         i, cum_reward, all_acts = self._aux_test_agent(agent)
     assert i == 31, "The powerflow diverged before step 30 for do nothing"
     expected_reward = dt_float(35140.027)
     assert np.abs(cum_reward - expected_reward, dtype=dt_float) <= self.tol_one, "The reward has not been properly computed"
Ejemplo n.º 8
0
    def update_action(n_clicks, action_dict, agent, scenario):
        episode = make_episode(agent, scenario)
        if action_dict is None:
            raise PreventUpdate
        action_dict = json.loads(
            action_dict.replace("(", "[").replace(")", "]"))

        # Temporary implementation for testing purposes
        p = Parameters()
        p.NO_OVERFLOW_DISCONNECTION = False
        env = make(
            r"D:\Projects\RTE-Grid2Viz\Grid2Op\grid2op\data\rte_case14_realistic",
            test=True,
            param=p,
        )
        env.seed(0)

        params_for_runner = env.get_params_for_runner()
        params_to_fetch = ["init_grid_path"]
        params_for_reboot = {
            key: value
            for key, value in params_for_runner.items()
            if key in params_to_fetch
        }
        params_for_reboot["parameters"] = p

        episode_reboot = EpisodeReboot.EpisodeReboot()
        agent_path = r"D:/Projects/RTE-Grid2Viz/grid2viz/grid2viz/data/agents/do-nothing-baseline"
        episode_reboot.load(
            env.backend,
            data=episode,
            agent_path=agent_path,
            name=episode.episode_name,
            env_kwargs=params_for_reboot,
        )
        current_time_step = 0
        obs, reward, *_ = episode_reboot.go_to(1)
        agent = DoNothingAgent(action_space=episode_reboot.env.action_space)
        act = agent.act(obs, reward)
        act = act.update(action_dict)
        obs, *_ = obs.simulate(action=act, time_step=0)
        network_graph = make_network(episode).plot_obs(observation=obs)
        return json.dumps(action_dict, indent=1), network_graph
Ejemplo n.º 9
0
def main(max_ts, ENV_NAME):
    backend = LightSimBackend()
    param = Parameters()
    param.init_from_dict({"NO_OVERFLOW_DISCONNECTION": True})

    env_klu = make(ENV_NAME, backend=backend, param=param, test=True)
    agent = DoNothingAgent(action_space=env_klu.action_space)
    nb_ts_klu, time_klu, aor_klu, gen_p_klu, gen_q_klu = run_env(env_klu, max_ts, agent)

    env_pp = make(ENV_NAME, param=param, test=True)
    agent = DoNothingAgent(action_space=env_pp.action_space)
    nb_ts_pp, time_pp, aor_pp, gen_p_pp, gen_q_pp = run_env(env_pp, max_ts, agent)

    print_res(env_klu, env_pp,
              nb_ts_klu, nb_ts_pp,
              time_klu, time_pp,
              aor_klu, aor_pp,
              gen_p_klu, gen_p_pp,
              gen_q_klu, gen_q_pp
              )
Ejemplo n.º 10
0
def get_agent(env, agent_name, **kwargsagent):
    if agent_name == "do_nothing":
        res = DoNothingAgent(env.action_space, **kwargsagent)
    elif agent_name == "random_n_n1":
        res = RandomNN1(env.action_space, **kwargsagent)
    elif agent_name == "random_n1":
        res = RandomN1(env.action_space, **kwargsagent)
    elif agent_name == "random_n2":
        res = RandomN2(env.action_space, **kwargsagent)
    else:
        raise NotImplementedError()
    return res
Ejemplo n.º 11
0
    def test_can_compute(self):
        """test that i can initialize the score and then delete the statistics"""
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make(os.path.join(PATH_DATA_TEST,
                                   "l2rpn_neurips_2020_track1_with_alert"),
                      test=True) as env:
                scores = ScoreICAPS2021(
                    env,
                    nb_scenario=2,
                    verbose=0,
                    max_step=50,
                    env_seeds=[
                        1, 2
                    ],  # with these seeds do nothing goes till the end
                    agent_seeds=[3, 4])
                my_agent = DoNothingAgent(env.action_space)
                scores_this, n_played, total_ts = scores.get(my_agent)
                for (ep_score, op_score, alarm_score) in scores_this:
                    assert np.abs(
                        ep_score - 30.
                    ) <= self.tol_one, f"wrong score for the episode: {ep_score} vs 30."
                    assert np.abs(op_score - 0.) <= self.tol_one, f"wrong score for the operationnal cost: " \
                                                                  f"{op_score} vs 0."
                    assert np.abs(alarm_score - 100.) <= self.tol_one, f"wrong score for the alarm: " \
                                                                       f"{alarm_score} vs 100."

            # the statistics have been properly computed
            assert os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(ScoreICAPS2021.NAME_DN)))
            assert os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(
                        ScoreICAPS2021.NAME_RP_NO_OVERFLOW)))

            # delete them
            scores.clear_all()
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN)))
            # assert not os.path.exists(os.path.join(env.get_path_env(),
            #                                        EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
            assert not os.path.exists(
                os.path.join(
                    env.get_path_env(),
                    EpisodeStatistics.get_name_dir(
                        ScoreL2RPN2020.NAME_RP_NO_OVERFLOW)))
Ejemplo n.º 12
0
                "is a positive integer.")

        for remote in self._remotes:
            remote.send(('z', new_chunk_size))


if __name__ == "__main__":
    from tqdm import tqdm
    from grid2op import make
    from grid2op.Agent import DoNothingAgent
    env = make()

    nb_env = 8  # change that to adapt to your system
    NB_STEP = 1000  # number of step for each environment

    agent = DoNothingAgent(env.action_space)
    multi_envs = MultiEnvironment(env=env, nb_env=nb_env)

    obs = multi_envs.reset()
    rews = [env.reward_range[0] for i in range(nb_env)]
    dones = [False for i in range(nb_env)]

    total_reward = 0.
    for i in tqdm(range(NB_STEP)):
        acts = [None for _ in range(nb_env)]
        for env_act_id in range(nb_env):
            acts[env_act_id] = agent.act(obs[env_act_id], rews[env_act_id],
                                         dones[env_act_id])
        obs, rews, dones, infos = multi_envs.step(acts)
        total_reward += np.sum(rews)
        len(rews)
Ejemplo n.º 13
0
def main(max_ts, ENV_NAME, test=True):
    param = Parameters()
    param.init_from_dict({"NO_OVERFLOW_DISCONNECTION": True})

    env_pp = make(ENV_NAME,
                  param=param,
                  test=test,
                  data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
    agent = DoNothingAgent(action_space=env_pp.action_space)
    nb_ts_pp, time_pp, aor_pp, gen_p_pp, gen_q_pp = run_env(env_pp,
                                                            max_ts,
                                                            agent,
                                                            chron_id=0,
                                                            env_seed=0)
    pp_time_pf = env_pp._time_powerflow
    wst = True  # print extra info in the run_env function
    env_lightsim = make(
        ENV_NAME,
        backend=LightSimBackend(),
        param=param,
        test=test,
        data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
    solver_types = env_lightsim.backend.available_solvers
    if lightsim2grid.SolverType.KLU in solver_types:
        env_lightsim.backend.set_solver_type(lightsim2grid.SolverType.KLU)
        env_lightsim.backend.set_solver_max_iter(10)
        nb_ts_klu, time_klu, aor_klu, gen_p_klu, gen_q_klu = run_env(
            env_lightsim,
            max_ts,
            agent,
            chron_id=0,
            with_type_solver=wst,
            env_seed=0)
        klu_comp_time = env_lightsim.backend.comp_time
        klu_time_pf = env_lightsim._time_powerflow
    if lightsim2grid.SolverType.SparseLU in solver_types:
        env_lightsim.backend.set_solver_type(lightsim2grid.SolverType.SparseLU)
        env_lightsim.backend.set_solver_max_iter(10)
        nb_ts_slu, time_slu, aor_slu, gen_p_slu, gen_q_slu = run_env(
            env_lightsim,
            max_ts,
            agent,
            chron_id=0,
            with_type_solver=wst,
            env_seed=0)
        slu_comp_time = env_lightsim.backend.comp_time
        slu_time_pf = env_lightsim._time_powerflow
    if lightsim2grid.SolverType.GaussSeidel in solver_types:
        env_lightsim.backend.set_solver_type(
            lightsim2grid.SolverType.GaussSeidel)
        env_lightsim.backend.set_solver_max_iter(10000)
        nb_ts_gs, time_gs, aor_gs, gen_p_gs, gen_q_gs = run_env(
            env_lightsim,
            max_ts,
            agent,
            chron_id=0,
            with_type_solver=wst,
            env_seed=0)
        gs_comp_time = env_lightsim.backend.comp_time
        gs_time_pf = env_lightsim._time_powerflow

    # NOW PRINT THE RESULTS
    env_name = get_env_name_displayed(ENV_NAME)
    hds = [
        f"{env_name}", f"grid2op speed (it/s)", f"grid2op powerflow time (ms)",
        f"solver powerflow time (ms)"
    ]
    tab = [[
        "PP",
        int(nb_ts_pp / time_pp), f"{1000.*pp_time_pf/nb_ts_pp:.2e}",
        f"{1000.*pp_time_pf/nb_ts_pp:.2e}"
    ]]
    if lightsim2grid.SolverType.GaussSeidel:
        tab.append([
            "LS+GS",
            int(nb_ts_gs / time_gs), f"{1000.*gs_time_pf/nb_ts_gs:.2e}",
            f"{1000.*gs_comp_time/nb_ts_gs:.2e}"
        ])
    if lightsim2grid.SolverType.SparseLU:
        tab.append([
            "LS+SLU",
            int(nb_ts_slu / time_slu), f"{1000.*slu_time_pf/nb_ts_slu:.2e}",
            f"{1000.*slu_comp_time/nb_ts_slu:.2e}"
        ])
    if lightsim2grid.SolverType.KLU:
        tab.append([
            "LS+KLU",
            int(nb_ts_klu / time_klu), f"{1000.*klu_time_pf/nb_ts_klu:.2e}",
            f"{1000.*klu_comp_time/nb_ts_klu:.2e}"
        ])
    res_use_with_grid2op_1 = tabulate(tab, headers=hds, tablefmt="rst")
    print(res_use_with_grid2op_1)
    print()

    res_github_readme = tabulate(tab, headers=hds, tablefmt="github")
    print(res_github_readme)
    print()

    hds = [
        f"{env_name} ({nb_ts_pp} iter)", f"Δ aor (amps)", f"Δ gen_p (MW)",
        f"Δ gen_q (MVAr)"
    ]
    tab = [["PP", "0.00", "0.00", "0.00"]]
    if lightsim2grid.SolverType.GaussSeidel:
        tab.append([
            "LS+GS", f"{np.max(np.abs(aor_gs - aor_pp)):.2e}",
            f"{np.max(np.abs(gen_p_gs - gen_p_pp)):.2e}",
            f"{np.max(np.abs(gen_q_gs - gen_q_pp)):.2e}"
        ])
    if lightsim2grid.SolverType.SparseLU:
        tab.append([
            "LS+SLU", f"{np.max(np.abs(aor_slu - aor_pp)):.2e}",
            f"{np.max(np.abs(gen_p_slu - gen_p_pp)):.2e}",
            f"{np.max(np.abs(gen_q_slu - gen_q_pp)):.2e}"
        ])
    if lightsim2grid.SolverType.KLU:
        tab.append([
            "LS+KLU", f"{np.max(np.abs(aor_klu - aor_pp)):.2e}",
            f"{np.max(np.abs(gen_p_klu - gen_p_pp)):.2e}",
            f"{np.max(np.abs(gen_q_klu - gen_q_pp)):.2e}"
        ])

    res_use_with_grid2op_2 = tabulate(tab, headers=hds, tablefmt="rst")
    print(res_use_with_grid2op_2)
Ejemplo n.º 14
0
    def compute(self,
                agent=None,
                parameters=None,
                nb_scenario=1,
                scores_func=None,
                max_step=-1,
                env_seeds=None,
                agent_seeds=None,
                nb_process=1,
                pbar=False):
        """
        This function will save (to be later used with :func:`EpisodeStatistics.get_statistics`) all the observation
        at all time steps, for a given number of scenario (see attributes nb_scenario).

        This is useful when you want to store at a given place some information to use later on on your agent.

        Notes
        -----
        Depending on its parameters (mainly the environment, the agent and the number of scenarios computed)
        this function might take a really long time to compute.

        However you only need to compute it once (unless you delete its results with
        :func:`EpisodeStatistics.clear_all` or :func:`EpisodeStatistics.clear_episode_data`

        Results might also take a lot of space on the hard drive (possibly few GB as all information of all
        observations encountered are stored)

        Parameters
        ----------

        agent: :class:`grid2op.Agent.BaseAgent`
            The agent you want to use to generate the statistics. Note that the statistics are highly dependant on
            the agent. For now only one set of statistics are computed. If you want to run a different agent previous
            results will be erased.

        parameters: :class:`grid2op.Parameters.Parameters`
            The parameters you want to use when computing this statistics

        nb_scenario: ``int``
            Number of scenarios that will be evaluated

        scores_func: :class:`grid2op.Reward.BaseReward`
            A reward used to compute the score of an Agent (it can now be a dictionary of BaseReward)

        nb_scenario: ``int``
            On how many scenarios you want the statistics to be computed

        max_step: ``int``
            Maximum number of steps you want to compute (see :func:`grid2op.Runner.Runner.run`)

        env_seeds: ``list``
            List of seeds used for the environment (for reproducible results) (see :func:`grid2op.Runner.Runner.run`)

        agent_seeds: ``list``
            List of seeds used for the agent (for reproducible results) (see :func:`grid2op.Runner.Runner.run`).

        nb_process: ``int``
            Number of process to use (see :func:`grid2op.Runner.Runner.run`)

        pbar: ``bool``
            Whether a progress bar is displayed (see :func:`grid2op.Runner.Runner.run`)

        """
        if agent is None:
            agent = DoNothingAgent(self.env.action_space)
        if parameters is None:
            parameters = copy.deepcopy(self.env.parameters)
        if not isinstance(agent, BaseAgent):
            raise RuntimeError(
                "\"agent\" should be either \"None\" to use DoNothingAgent or an agent that inherits "
                "grid2op.Agent.BaseAgent")
        if not isinstance(parameters, Parameters):
            raise RuntimeError(
                "\"parameters\" should be either \"None\" to use the default parameters passed in the "
                "environment or inherits grid2op.Parameters.Parameters")

        score_names = None
        dict_metadata = self._fill_metadata(agent, parameters, max_step,
                                            agent_seeds, env_seeds)

        if scores_func is not None:
            if EpisodeStatistics._check_if_base_reward(scores_func):
                dict_metadata["score_class"] = f"{scores_func}"
                score_names = [self.SCORES]
            elif isinstance(scores_func, dict):
                score_names = []
                for nm, score_fun in scores_func.items():
                    if not EpisodeStatistics._check_if_base_reward(score_fun):
                        raise Grid2OpException(
                            "if using \"score_fun\" as a dictionary, each value need to be a "
                            "BaseReward")
                    dict_metadata[f"score_class_{nm}"] = f"{score_fun}"
                    score_names.append(f"{nm}_{self.SCORES}")
            else:
                raise Grid2OpException(
                    "score_func should be either a dictionary or an instance of BaseReward"
                )

        self.run_env(env=self.env,
                     path_save=self.path_save_stats,
                     parameters=parameters,
                     scores_func=scores_func,
                     agent=agent,
                     max_step=max_step,
                     env_seeds=env_seeds,
                     agent_seeds=agent_seeds,
                     pbar=pbar,
                     nb_process=nb_process,
                     nb_scenario=nb_scenario)

        # inform grid2op this is a statistics directory
        self._tell_is_stats()
        if scores_func is not None:
            self._tell_has_score()

        # now clean a bit the output directory
        os.remove(os.path.join(self.path_save_stats, EpisodeData.ACTION_SPACE))
        os.remove(os.path.join(self.path_save_stats, EpisodeData.ATTACK_SPACE))
        os.remove(
            os.path.join(self.path_save_stats, EpisodeData.ENV_MODIF_SPACE))
        os.remove(os.path.join(self.path_save_stats, EpisodeData.OBS_SPACE))

        li_episodes = EpisodeData.list_episode(self.path_save_stats)
        for path_tmp, episode_name in li_episodes:
            # remove the useless information (saved but not used)
            self._delete_if_exists(path_tmp, episode_name, EpisodeData.ACTIONS)
            self._delete_if_exists(path_tmp, episode_name,
                                   EpisodeData.AG_EXEC_TIMES)
            self._delete_if_exists(path_tmp, episode_name,
                                   EpisodeData.LINES_FAILURES)
            self._delete_if_exists(path_tmp, episode_name,
                                   EpisodeData.ENV_ACTIONS)
            self._delete_if_exists(path_tmp, episode_name, EpisodeData.ATTACK)
            if scores_func is not None:
                self._retrieve_scores(path_tmp, episode_name)
            else:
                self._delete_if_exists(path_tmp, episode_name,
                                       EpisodeData.OTHER_REWARDS)
            self._delete_if_exists(path_tmp, episode_name, EpisodeData.REWARDS)

            # reformat the observation into a proper "human readable" format
            self._clean_observations(path_tmp, episode_name)

        # and now gather the information for at the top level
        self._gather_all(li_episodes, dict_metadata, score_names=score_names)
Ejemplo n.º 15
0
                this_epi_scores = json.load(f)
            score_this_ep, nb_ts_survived, total_ts_tmp = \
                self._compute_episode_score(ep_id,
                                            meta=this_epi_meta,
                                            other_rewards=this_epi_scores,
                                            dn_metadata=meta_data_dn,
                                            no_ov_metadata=no_ov_metadata)
            all_scores.append(score_this_ep)
            ts_survived.append(nb_ts_survived)
            total_ts.append(total_ts_tmp)

        if need_delete:
            dir_tmp.cleanup()
        return all_scores, ts_survived, total_ts


if __name__ == "__main__":
    import grid2op
    from lightsim2grid import LightSimBackend
    from grid2op.Agent import RandomAgent, DoNothingAgent
    env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
    nb_scenario = 16
    my_score = ScoreL2RPN2020(env,
                              nb_scenario=nb_scenario,
                              env_seeds=[0 for _ in range(nb_scenario)],
                              agent_seeds=[0 for _ in range(nb_scenario)])

    my_agent = RandomAgent(env.action_space)
    my_agent = DoNothingAgent(env.action_space)
    print(my_score.get(my_agent))
Ejemplo n.º 16
0
 def __init__(self, action_space, observation_space, name, **kwargs):
     DoNothingAgent.__init__(self, action_space)
     self.do_nothing = self.action_space()
     self.name = name
Ejemplo n.º 17
0
def main(max_ts, ENV_NAME, test=True):
    param = Parameters()
    param.init_from_dict({"NO_OVERFLOW_DISCONNECTION": True})

    env_pp = make(ENV_NAME,
                  param=param,
                  test=test,
                  data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
    agent = DoNothingAgent(action_space=env_pp.action_space)
    nb_ts_pp, time_pp, aor_pp, gen_p_pp, gen_q_pp = run_env(env_pp,
                                                            max_ts,
                                                            agent,
                                                            chron_id=0,
                                                            env_seed=0)
    pp_time_pf = env_pp._time_powerflow
    wst = False  # print extra info in the run_env function

    env_lightsim = make(
        ENV_NAME,
        backend=LightSimBackend(),
        param=param,
        test=test,
        data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
    li_tols = [10., 1., 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9]

    nb_ts = []
    time = []
    aor = []
    gen_p = []
    gen_q = []
    comp_time = []
    time_pf = []
    for tol in li_tols:
        env_lightsim.backend.set_tol(tol)
        nb_ts_, time_, aor_, gen_p_, gen_q_ = run_env(env_lightsim,
                                                      max_ts,
                                                      agent,
                                                      chron_id=0,
                                                      with_type_solver=wst,
                                                      env_seed=0)
        comp_time_ = env_lightsim.backend.comp_time
        time_pf_ = env_lightsim._time_powerflow
        nb_ts.append(nb_ts_)
        time.append(time_)
        aor.append(aor_)
        gen_p.append(gen_p_)
        gen_q.append(gen_q_)
        comp_time.append(comp_time_)
        time_pf.append(time_pf_)

    # NOW PRINT THE RESULTS
    env_name = get_env_name_displayed(ENV_NAME)
    hds = [
        f"{env_name} ({nb_ts_pp} iter)", f"speed (it/s)", f"Δ aor (amps)",
        f"Δ gen_p (MW)", f"Δ gen_q (MVAr)"
    ]
    tab = [["PP", int(nb_ts_pp / time_pp), "0.00", "0.00", "0.00"]]
    for i, tol in enumerate(li_tols):
        if lightsim2grid.SolverType.GaussSeidel:
            tab.append([
                f"{tol:.2e}", f"{int(nb_ts[i] / time[i])}",
                f"{np.max(np.abs(aor[i] - aor_pp)):.2e}",
                f"{np.max(np.abs(gen_p[i] - gen_p_pp)):.2e}",
                f"{np.max(np.abs(gen_q[i] - gen_q_pp)):.2e}"
            ])

    res_tol = tabulate(tab, headers=hds, tablefmt="rst")
    print(res_tol)
Ejemplo n.º 18
0
def study(env,
          name=DEFAULT_NAME,
          load_path=None,
          logs_path=DEFAULT_LOGS_DIR,
          nb_episode=DEFAULT_NB_EPISODE,
          nb_process=DEFAULT_NB_PROCESS,
          max_steps=DEFAULT_MAX_STEPS,
          verbose=False,
          save_gif=False):
    """study the prediction of the grid_model"""

    # Limit gpu usage
    physical_devices = tf.config.list_physical_devices('GPU')
    if len(physical_devices):
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    runner_params = env.get_params_for_runner()
    runner_params["verbose"] = verbose

    if load_path is None:
        raise RuntimeError(
            "Cannot evaluate a model if there is nothing to be loaded.")
    path_model, path_target_model = LeapNetEncoded_NN.get_path_model(
        load_path, name)
    nn_archi = LeapNetEncoded_NNParam.from_json(
        os.path.join(path_model, "nn_architecture.json"))

    # Run
    # Create agent
    agent = LeapNetEncoded(action_space=env.action_space,
                           name=name,
                           store_action=nb_process == 1,
                           nn_archi=nn_archi,
                           observation_space=env.observation_space)

    # Load weights from file
    agent.load(load_path)

    # Print model summary
    stringlist = []
    agent.deep_q._model.summary(print_fn=lambda x: stringlist.append(x))
    short_model_summary = "\n".join(stringlist)
    if verbose:
        print(short_model_summary)

    from grid2op.Agent import RandomAgent
    from grid2op.Agent import DoNothingAgent
    policy_agent = DoNothingAgent(env.action_space)
    policy_agent.seed(0)

    env.set_id(0)
    res = {k: ([], []) for k in nn_archi.list_attr_obs_gm_out}
    with tqdm(desc="step") as pbar:
        for i in range(nb_episode):
            obs = env.reset()
            reward = env.reward_range[0]
            done = False
            while not done:
                obs_converted = agent.convert_obs(obs)
                data_nn, true_output_grid = agent.deep_q._make_x_tau(
                    obs_converted)

                for i, (var_n, add, mult) in enumerate(
                        zip(nn_archi.list_attr_obs_gm_out,
                            nn_archi.gm_out_adds, nn_archi.gm_out_mults)):
                    tmp = true_output_grid[i]
                    tmp = tmp / mult - add
                    true_output_grid[i] = tmp

                pred = agent.deep_q.grid_model.predict(data_nn, batch_size=1)
                real_pred = []
                for i, (var_n, add, mult) in enumerate(
                        zip(nn_archi.list_attr_obs_gm_out,
                            nn_archi.gm_out_adds, nn_archi.gm_out_mults)):
                    tmp = pred[i]
                    tmp = tmp / mult - add
                    real_pred.append(tmp)

                for i, var_n in enumerate(nn_archi.list_attr_obs_gm_out):
                    res[var_n][0].append(real_pred[i].reshape(-1))
                    res[var_n][1].append(true_output_grid[i].reshape(-1))

                obs, reward, done, info = env.step(
                    policy_agent.act(obs, reward, done))
                pbar.update(1)

    print("Results")
    from sklearn.metrics import mean_squared_error
    for var_n, (pred, true) in res.items():
        true = np.array(true)
        pred = np.array(pred)
        RMSE = mean_squared_error(y_true=true,
                                  y_pred=pred,
                                  multioutput="raw_values",
                                  squared=False)
        print("RMSE for {}: {:.2f} % variance".format(
            var_n, 100. * np.mean(RMSE / np.std(true))))
    return agent