예제 #1
0
def evaluate(env,
             load_path=None,
             logs_path=DEFAULT_LOGS_DIR,
             nb_episode=DEFAULT_NB_EPISODE,
             nb_process=DEFAULT_NB_PROCESS,
             max_steps=DEFAULT_MAX_STEPS,
             verbose=False,
             save_gif=False):

    runner_params = env.get_params_for_runner()
    runner_params["verbose"] = args.verbose

    # Build runner
    runner = Runner(**runner_params,
                    agentClass=DoNothingAgent)

    # Run
    os.makedirs(logs_path, exist_ok=True)
    res = runner.run(path_save=logs_path,
                     nb_episode=nb_episode,
                     nb_process=nb_process,
                     max_iter=max_steps,
                     pbar=True)

    # Print summary
    print("Evaluation summary:")
    for _, chron_name, cum_reward, nb_time_step, max_ts in res:
        msg_tmp = "chronics at: {}".format(chron_name)
        msg_tmp += "\ttotal reward: {:.6f}".format(cum_reward)
        msg_tmp += "\ttime steps: {:.0f}/{:.0f}".format(nb_time_step, max_ts)
        print(msg_tmp)

    if save_gif:
        save_log_gif(logs_path, res)
예제 #2
0
    def test_issue_126(self):
        # run redispatch agent on one scenario for 100 timesteps
        dataset = "rte_case14_realistic"
        nb_episode = 1
        nb_timesteps = 100

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            env = make(dataset, test=True)

        agent = DeltaRedispatchRandomAgent(env.action_space)
        runner = Runner(**env.get_params_for_runner(),
                        agentClass=None,
                        agentInstance=agent)

        with tempfile.TemporaryDirectory() as tmpdirname:
            res = runner.run(nb_episode=nb_episode,
                             path_save=tmpdirname,
                             nb_process=1,
                             max_iter=nb_timesteps,
                             env_seeds=[0],
                             agent_seeds=[0],
                             pbar=False)
            episode_data = EpisodeData.from_disk(tmpdirname, res[0][1])

        assert len(episode_data.actions.objects
                   ) - nb_timesteps == 0, "wrong number of actions"
        assert len(episode_data.actions
                   ) - nb_timesteps == 0, "wrong number of actions"
        assert len(episode_data.observations.objects) - (
            nb_timesteps + 1) == 0, "wrong number of observations"
        assert len(episode_data.observations) - (
            nb_timesteps + 1) == 0, "wrong number of observations"
예제 #3
0
 def test_multiprocess_windows_no_fail(self):
     """test that i can run multiple times parallel run of the same env (breaks on windows)"""
     nb_episode = 2
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("rte_case5_example", test=True) as env:
             f = tempfile.mkdtemp()
             runner_params = env.get_params_for_runner()
             runner = Runner(**runner_params)
             res1 = runner.run(path_save=f,
                               nb_episode=nb_episode,
                               nb_process=2,
                               max_iter=self.max_iter)
             res2 = runner.run(path_save=f,
                               nb_episode=nb_episode,
                               nb_process=1,
                               max_iter=self.max_iter)
             res3 = runner.run(path_save=f,
                               nb_episode=nb_episode,
                               nb_process=2,
                               max_iter=self.max_iter)
     test_ = set()
     for id_chron, name_chron, cum_reward, nb_time_step, max_ts in res1:
         test_.add(name_chron)
     assert len(test_) == nb_episode
     test_ = set()
     for id_chron, name_chron, cum_reward, nb_time_step, max_ts in res2:
         test_.add(name_chron)
     assert len(test_) == nb_episode
     test_ = set()
     for id_chron, name_chron, cum_reward, nb_time_step, max_ts in res3:
         test_.add(name_chron)
     assert len(test_) == nb_episode
예제 #4
0
    def test_load_ambiguous(self):
        f = tempfile.mkdtemp()

        class TestSuitAgent(BaseAgent):
            def __init__(self, *args, **kwargs):
                BaseAgent.__init__(self, *args, **kwargs)

            def act(self, observation, reward, done=False):
                # do a ambiguous action
                return self.action_space({
                    "set_line_status": [(0, 1)],
                    "change_line_status": [0]
                })

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with grid2op.make("rte_case14_test", test=True) as env:
                my_agent = TestSuitAgent(env.action_space)
                runner = Runner(**env.get_params_for_runner(),
                                agentClass=None,
                                agentInstance=my_agent)

                # test that the right seeds are assigned to the agent
                res = runner.run(nb_episode=1,
                                 max_iter=self.max_iter,
                                 path_save=f)
            episode_data = EpisodeData.from_disk(agent_path=f, name=res[0][1])
        assert int(episode_data.meta["chronics_max_timestep"]) == self.max_iter
        assert len(episode_data.actions) == self.max_iter
        assert len(episode_data.observations) == self.max_iter + 1
예제 #5
0
    def run_env(env, path_save, parameters, scores_func, agent, nb_scenario,
                max_step, env_seeds, agent_seeds, pbar, nb_process):

        if scores_func is not None:
            if not (EpisodeStatistics._check_if_base_reward(scores_func)
                    or isinstance(scores_func, dict)):
                raise Grid2OpException(
                    "score_func should be either a dictionary or an instance of BaseReward"
                )

        dict_kwg = env.get_params_for_runner()
        dict_kwg["parameters_path"] = parameters.to_dict()
        if "other_rewards" not in dict_kwg:
            dict_kwg["other_rewards"] = {}
        if scores_func is not None:
            if EpisodeStatistics._check_if_base_reward(scores_func):
                dict_kwg["other_rewards"][
                    EpisodeStatistics.KEY_SCORE] = scores_func
            elif isinstance(scores_func, dict):
                for nm, score_fun in scores_func.items():
                    dict_kwg["other_rewards"][
                        f"{EpisodeStatistics.KEY_SCORE}_{nm}"] = score_fun
            else:
                raise RuntimeError(
                    "\"scores_func\" should inherit from \"grid2op.Reward.BaseReward\" or "
                    "be a dictionary")
        runner = Runner(**dict_kwg, agentClass=None, agentInstance=agent)
        runner.run(path_save=path_save,
                   nb_episode=nb_scenario,
                   max_iter=max_step,
                   env_seeds=env_seeds,
                   agent_seeds=agent_seeds,
                   pbar=pbar,
                   nb_process=nb_process)
예제 #6
0
    def test_seed_properly_set(self):
        class TestSuitAgent(RandomAgent):
            def __init__(self, *args, **kwargs):
                RandomAgent.__init__(self, *args, **kwargs)
                self.seeds = []

            def seed(self, seed):
                super().seed(seed)
                self.seeds.append(seed)

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make("rte_case14_test", test=True) as env:
                my_agent = TestSuitAgent(env.action_space)
                runner = Runner(**env.get_params_for_runner(),
                                agentClass=None,
                                agentInstance=my_agent)

        # test that the right seeds are assigned to the agent
        res = runner.run(nb_episode=3,
                         max_iter=self.max_iter,
                         env_seeds=[1, 2, 3],
                         agent_seeds=[5, 6, 7])
        assert np.all(my_agent.seeds == [5, 6, 7])

        # test that is no seeds are set, then the "seed" funciton of the agent is not called.
        my_agent.seeds = []
        res = runner.run(nb_episode=3,
                         max_iter=self.max_iter,
                         env_seeds=[1, 2, 3])
        assert my_agent.seeds == []
예제 #7
0
 def test_init_from_env_with_other_reward(self):
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("case14_test", other_rewards={"test":
                                                 L2RPNReward}) as env:
             runner = Runner(**env.get_params_for_runner())
     runner.run(nb_episode=1, max_iter=self.max_iter)
예제 #8
0
def evaluate(env,
             load_path=None,
             logs_path=DEFAULT_LOGS_DIR,
             nb_episode=DEFAULT_NB_EPISODE,
             nb_process=DEFAULT_NB_PROCESS,
             max_steps=DEFAULT_MAX_STEPS,
             verbose=DEFAULT_VERBOSE,
             save_gif=False):

    # Limit gpu usage
    physical_devices = tf.config.list_physical_devices('GPU')
    if len(physical_devices):
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    runner_params = env.get_params_for_runner()
    runner_params["verbose"] = verbose

    # Run
    # Create agent
    agent = RDQNAgent(env.observation_space,
                      env.action_space,
                      is_training=False)

    # Load weights from file
    agent.load(load_path)

    # Build runner
    runner = Runner(**runner_params,
                    agentClass=None,
                    agentInstance=agent)

    # Print model summary
    if verbose:
        stringlist = []
        agent.Qmain.model.summary(print_fn=lambda x: stringlist.append(x))
        short_model_summary = "\n".join(stringlist)
        print(short_model_summary)

    # Run
    os.makedirs(logs_path, exist_ok=True)
    res = runner.run(path_save=logs_path,
                     nb_episode=nb_episode,
                     nb_process=nb_process,
                     max_iter=max_steps,
                     pbar=verbose)

    # Print summary
    if verbose:
        print("Evaluation summary:")
        for _, chron_name, cum_reward, nb_time_step, max_ts in res:
            msg_tmp = "chronics at: {}".format(chron_name)
            msg_tmp += "\ttotal reward: {:.6f}".format(cum_reward)
            msg_tmp += "\ttime steps: {:.0f}/{:.0f}".format(nb_time_step,
                                                            max_ts)
            print(msg_tmp)

    if save_gif:
        save_log_gif(logs_path, res)

    return res
예제 #9
0
 def test_seed_par(self):
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("rte_case14_test", test=True) as env:
             runner = Runner(**env.get_params_for_runner())
     res = runner.run(nb_episode=2, nb_process=2, max_iter=self.max_iter, env_seeds=[1, 2], agent_seeds=[3, 4])
     for i, _, cum_reward, timestep, total_ts in res:
         assert int(timestep) == self.max_iter
예제 #10
0
 def test_init_from_env(self):
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("rte_case14_test", test=True) as env:
             runner = Runner(**env.get_params_for_runner())
     res = runner.run(nb_episode=1, max_iter=self.max_iter)
     for i, _, cum_reward, timestep, total_ts in res:
         assert int(timestep) == self.max_iter
예제 #11
0
 def test_env_alarmtime_default(self):
     """test default values are correct"""
     assert self.env.parameters.ALARM_WINDOW_SIZE == 12
     assert self.env.parameters.ALARM_BEST_TIME == 12
     runner = Runner(**self.env.get_params_for_runner())
     env_runner = runner.init_env()
     assert env_runner.parameters.ALARM_WINDOW_SIZE == 12
     assert env_runner.parameters.ALARM_BEST_TIME == 12
예제 #12
0
 def test_nomaxiter_par(self):
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("rte_case14_test", test=True) as env:
             runner = Runner(**env.get_params_for_runner())
     runner.gridStateclass_kwargs["max_iter"] = 2 * self.max_iter
     res = runner.run(nb_episode=2, nb_process=2)
     for i, _, cum_reward, timestep, total_ts in res:
         assert int(timestep) == 2 * self.max_iter
예제 #13
0
 def test_nomaxiter(self):
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("rte_case14_test", test=True) as env:
             runner = Runner(**env.get_params_for_runner())
     runner.chronics_handler.set_max_iter(2 * self.max_iter)
     res = runner.run(nb_episode=1)
     for i, _, cum_reward, timestep, total_ts in res:
         assert int(timestep) == 2 * self.max_iter
예제 #14
0
def run(runner_params_from_env, NB_EPISODE, agent_path, max_steps_in_episode,
        agent, gif_save, agent_name):
    # Build runner
    runner = Runner(**runner_params_from_env,
                    agentClass=None,
                    agentInstance=agent)

    PATH_SAVE = os.path.join(agent_path,
                             agent_name)  #agent_path + "\\" + agent_name
    PATH_SAVE2 = agent_path + r"\_cache"

    # Check if there are any previously stored results. If they are available then delete them and use the "Runner" to
    # evaluate more scenarios using the agent.
    if os.path.exists(PATH_SAVE):
        shutil.rmtree(PATH_SAVE)
    # WE ALSO REMOVE THE __CACHE WHICH IS CREATED BY THE GRID2VIZ. (This is useful especially when the agent's
    # performance data or the scenario data is overwritten. Since the grid2viz then needs the cache to be reset).
    # Here we always reset it to ensure the grid2viz results are displaying the most recently generated agent's behavior
    # correctly. (This will increase the running time of grid2viz to display initial results).
    if os.path.exists(PATH_SAVE2):
        shutil.rmtree(PATH_SAVE2)

    os.makedirs(agent_path, exist_ok=True)
    # NOTE: This code does not support the "runner.run" to use multiple cores.
    res = runner.run(nb_episode=NB_EPISODE,
                     path_save=PATH_SAVE,
                     max_iter=max_steps_in_episode)
    # print(agent.action_list)

    if gif_save:
        # Parameters to save the gif image of the performance of the agent.
        gif_name = "episode"
        ep_replay = EpisodeReplay(agent_path=PATH_SAVE)

    print("The results for the trained agent are:")
    for _, chron_name, cum_reward, nb_time_step, max_ts in res:
        msg_tmp = "\tFor chronics located at {}\n".format(chron_name)
        msg_tmp += "\t\t - cumulative reward: {:.6f}\n".format(cum_reward)
        msg_tmp += "\t\t - number of time steps completed: {:.0f} / {:.0f}".format(
            nb_time_step, max_ts)
        print(msg_tmp)

        if gif_save:
            # Uncomment the below code to save the gif image to "PATH_SAVE"
            ep_replay.replay_episode(
                chron_name,  # which chronic was started
                gif_name=gif_name,  # Name of the gif file
                display=False,  # dont wait before rendering each frames
                fps=3.0)  # limit to 3 frames per second
예제 #15
0
 def test_generate_and_read_agent_redisp(self):
     with make(self.case, param=self.param, backend=self.backend) as env:
         agent = RandomRedispatchAgent(env.action_space, env)
         runner = Runner(**env.get_params_for_runner(),
                         agentClass=None,
                         agentInstance=agent)
         # need to be seeded for reproducibility as this takes random redispatching actions
         runner.run(nb_episode=1,
                    path_save=os.path.join(self.agents_path, self.agent_name),
                    nb_process=1,
                    max_iter=10,
                    env_seeds=[0],
                    agent_seeds=[0],
                    pbar=True)
         env.close()
예제 #16
0
 def run_env(env, path_save, parameters, scores_func, agent, nb_scenario,
             max_step, env_seeds, agent_seeds, pbar, nb_process):
     dict_kwg = env.get_params_for_runner()
     dict_kwg["parameters_path"] = parameters.to_dict()
     if scores_func is not None:
         if not issubclass(scores_func, BaseReward):
             raise RuntimeError("\"scores_func\" should inherit from \"grid2op.Reward.BaseReward\"")
         dict_kwg["other_rewards"] = {EpisodeStatistics.KEY_SCORE: scores_func}
     runner = Runner(**dict_kwg, agentClass=None, agentInstance=agent)
     runner.run(path_save=path_save,
                nb_episode=nb_scenario,
                max_iter=max_step,
                env_seeds=env_seeds,
                agent_seeds=agent_seeds,
                pbar=pbar,
                nb_process=nb_process)
예제 #17
0
 def test_complex_agent(self):
     nb_episode = 4
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("rte_case5_example", test=True) as env:
             f = tempfile.mkdtemp()
             runner_params = env.get_params_for_runner()
             runner = Runner(**runner_params)
             res = runner.run(path_save=f,
                              nb_episode=nb_episode,
                              nb_process=2,
                              max_iter=self.max_iter)
     test_ = set()
     for id_chron, name_chron, cum_reward, nb_time_step, max_ts in res:
         test_.add(name_chron)
     assert len(test_) == nb_episode
예제 #18
0
 def test_custom_reward_runner(self):
     """test i can generate the reward and use it in the envs"""
     reward_cls = RedispReward.generate_class_custom_params(
         alpha_redisph=2,
         min_load_ratio=0.15,
         worst_losses_ratio=0.05,
         min_reward=-10.,
         reward_illegal_ambiguous=0.,
         least_losses_ratio=0.015)
     env_name = "l2rpn_case14_sandbox"
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with grid2op.make(env_name, test=True,
                           reward_class=reward_cls) as env:
             obs = env.reset()
             runner = Runner(**env.get_params_for_runner())
             res = runner.run(nb_episode=2, nb_process=2)
예제 #19
0
def get_runner(path_chronics=PATH_DATA, submission_dir="."):
    submitted_controler = get_submitted_controller(submission_dir)
    runner = Runner(init_grid_path=L2RPN2019_CASEFILE,
                    path_chron=path_chronics,
                    names_chronics_to_backend=L2RPN2019_DICT_NAMES,
                    gridStateclass_kwargs={"gridvalueClass": ReadPypowNetData},
                    rewardClass=L2RPNReward,
                    agentClass=submitted_controler)
    return runner
예제 #20
0
    def setUp(self):
        super().setUp()
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            self.env = grid2op.make("l2rpn_case14_sandbox",
                                    test=True,
                                    backend=LightSimBackend())

        self.runner = Runner(**self.env.get_params_for_runner())
예제 #21
0
 def test_one_process_par(self):
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         res = Runner._one_process_parrallel(self.runner, [0], 0, None, None, self.max_iter)
     assert len(res) == 1
     _, el1, el2, el3, el4 = res[0]
     assert el1 == "1"
     assert np.abs(el2 - self.real_reward) <= self.tol_one
     assert el3 == 10
     assert el4 == 10
예제 #22
0
    def setUp(self):
        self.init_grid_path = os.path.join(PATH_DATA_TEST_PP,
                                           "test_case14.json")
        self.path_chron = PATH_ADN_CHRONICS_FOLDER
        self.parameters_path = None
        self.max_iter = 10
        self.real_reward = dt_float(7748.425)
        self.real_reward_li = [dt_float(7748.425), dt_float(7786.89599609375)]

        self.all_real_rewards = [
            dt_float(el) for el in [
                761.3295, 768.10144, 770.2673, 767.767, 768.69, 768.71246,
                779.1029, 783.2737, 788.7833, 792.39764
            ]
        ]
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
        self.runner = Runner(**self.env.get_params_for_runner())
예제 #23
0
def main(path_casefile=None,
         path_chronics=None,
         path_parameters=None,
         chronics_class=Multifolder,
         backend_class=PandaPowerBackend,
         agent_class=DoNothingAgent,
         reward_class=FlatReward,
         observation_class=CompleteObservation,
         legalAct_class=AlwaysLegal,
         nb_episode=3,
         nb_process=1,
         path_save=None,
         names_chronics_to_backend=None,
         gridStateclass_kwargs={}):
    if path_casefile is None:
        init_grid_path = DEFAULT_TEST_CASE
    else:
        init_grid_path = os.path.abspath(path_casefile)

    if path_chronics is None:
        path_chron = DEFAULT_CHRONICS_DATA
    else:
        path_chron = os.path.abspath(path_chronics)

    parameters_path = path_parameters

    runner = Runner(init_grid_path=init_grid_path,
                    path_chron=path_chron,
                    parameters_path=parameters_path,
                    names_chronics_to_backend=names_chronics_to_backend,
                    gridStateclass=chronics_class,
                    gridStateclass_kwargs=gridStateclass_kwargs,
                    backendClass=backend_class,
                    rewardClass=reward_class,
                    agentClass=agent_class,
                    observationClass=observation_class,
                    legalActClass=legalAct_class)

    res = runner.run(nb_episode=nb_episode,
                     nb_process=nb_process,
                     path_save=path_save)
    return res
예제 #24
0
 def test_collection_wrapper_after_run(self):
     OneChange = OneChangeThenNothing.gen_next(
         {"set_bus": {
             "lines_or_id": [(1, -1)]
         }})
     runner = Runner(
         init_grid_path=self.init_grid_path,
         path_chron=self.path_chron,
         parameters_path=self.parameters_path,
         names_chronics_to_backend=self.names_chronics_to_backend,
         gridStateclass=self.gridStateclass,
         backendClass=self.backendClass,
         rewardClass=L2RPNReward,
         other_rewards={"test": L2RPNReward},
         max_iter=self.max_iter,
         name_env="test_episodedata_env",
         agentClass=OneChange)
     _, cum_reward, timestep, episode_data = runner.run_one_episode(
         max_iter=self.max_iter, detailed_output=True)
     # Check that the type of first action is set bus
     assert episode_data.actions[0].get_types()[2]
예제 #25
0
    def test_backward_compatibility(self):
        backward_comp_version = ["1.0.0", "1.1.0", "1.1.1", "1.2.0", "1.2.1", "1.2.2", "1.2.3", "1.3.0", "1.3.1",
                                 "1.4.0"]
        curr_version = "current_version"
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make("rte_case5_example", test=True) as env, \
                 tempfile.TemporaryDirectory() as path:
                runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent)
                runner.run(nb_episode=2,
                           path_save=os.path.join(path, curr_version),
                           pbar=False,
                           max_iter=100,
                           env_seeds=[1, 0],
                           agent_seeds=[42, 69])
                # check that i can read this data generate for this runner
                self._aux_backward(path, curr_version)

        for grid2op_version in backward_comp_version:
            # check that i can read previous data stored from previous grid2Op version
            self._aux_backward(PATH_PREVIOUS_RUNNER, f"res_agent_{grid2op_version}")
예제 #26
0
    def test_runner(self):
        """test i can create properly a runner"""
        runner = Runner(**self.env.get_params_for_runner())

        # normal run
        res = runner.run(nb_episode=1, nb_process=1, max_iter=self.max_iter)
        assert res[0][-1] == 10
        assert res[0][-2] == 10
        assert res[0][-3] == 1.0

        # run + episode data
        with tempfile.TemporaryDirectory() as f:
            res = runner.run(nb_episode=1,
                             nb_process=1,
                             max_iter=self.max_iter,
                             path_save=f)
            ep_dat = EpisodeData.from_disk(agent_path=f, name=res[0][1])
            assert len(ep_dat) == 10
            assert ep_dat.observations[0].attention_budget == 3
            assert ep_dat.observations[1].attention_budget == 3 + 1. / (12. *
                                                                        8.)
예제 #27
0
    def test_backward_compatibility(self):
        backward_comp_version = [
            "1.0.0", "1.1.0", "1.1.1", "1.2.0", "1.2.1", "1.2.2", "1.2.3",
            "1.3.0", "1.3.1", "1.4.0", "1.5.0", "1.5.1", "1.5.1.post1", "1.5.2"
        ]
        curr_version = "test_version"
        assert 'curtailment' in CompleteObservation.attr_list_vect, "error at the beginning"
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            with make("rte_case5_example", test=True) as env, \
                    tempfile.TemporaryDirectory() as path:
                runner = Runner(**env.get_params_for_runner(),
                                agentClass=RandomAgent)
                runner.run(nb_episode=2,
                           path_save=os.path.join(path, curr_version),
                           pbar=False,
                           max_iter=100,
                           env_seeds=[1, 0],
                           agent_seeds=[42, 69])
                # check that i can read this data generate for this runner
                self._aux_backward(path, curr_version, curr_version)

        assert 'curtailment' in CompleteObservation.attr_list_vect, "error after the first runner"

        # check that it raises a warning if loaded on the compatibility version
        grid2op_version = backward_comp_version[0]
        with self.assertWarns(UserWarning):
            self._aux_backward(PATH_PREVIOUS_RUNNER,
                               f"res_agent_{grid2op_version}", grid2op_version)

        for grid2op_version in backward_comp_version:
            # check that i can read previous data stored from previous grid2Op version
            # can be loaded properly
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore")
                self._aux_backward(PATH_PREVIOUS_RUNNER,
                                   f"res_agent_{grid2op_version}",
                                   grid2op_version)
            assert 'curtailment' in CompleteObservation.attr_list_vect, f"error after the legacy version " \
                                                                        f"{grid2op_version}"
예제 #28
0
 def setUp(self):
     """
     The case file is a representation of the case14 as found in the ieee14 powergrid.
     :return:
     """
     self.init_grid_path = os.path.join(PATH_DATA_TEST_PP, "test_case14.json")
     self.path_chron = PATH_ADN_CHRONICS_FOLDER
     self.parameters_path = None
     self.max_iter = 10
     self.real_reward = dt_float(199.99800)
     self.names_chronics_to_backend = {"loads": {"2_C-10.61": 'load_1_0', "3_C151.15": 'load_2_1',
                                                 "14_C63.6": 'load_13_2', "4_C-9.47": 'load_3_3',
                                                 "5_C201.84": 'load_4_4',
                                                 "6_C-6.27": 'load_5_5', "9_C130.49": 'load_8_6',
                                                 "10_C228.66": 'load_9_7',
                                                 "11_C-138.89": 'load_10_8', "12_C-27.88": 'load_11_9',
                                                 "13_C-13.33": 'load_12_10'},
                                       "lines": {'1_2_1': '0_1_0', '1_5_2': '0_4_1', '9_10_16': '8_9_2',
                                                 '9_14_17': '8_13_3',
                                                 '10_11_18': '9_10_4', '12_13_19': '11_12_5', '13_14_20': '12_13_6',
                                                 '2_3_3': '1_2_7', '2_4_4': '1_3_8', '2_5_5': '1_4_9',
                                                 '3_4_6': '2_3_10',
                                                 '4_5_7': '3_4_11', '6_11_11': '5_10_12', '6_12_12': '5_11_13',
                                                 '6_13_13': '5_12_14', '4_7_8': '3_6_15', '4_9_9': '3_8_16',
                                                 '5_6_10': '4_5_17',
                                                 '7_8_14': '6_7_18', '7_9_15': '6_8_19'},
                                       "prods": {"1_G137.1": 'gen_0_4', "3_G36.31": "gen_2_1", "6_G63.29": "gen_5_2",
                                                 "2_G-56.47": "gen_1_0", "8_G40.43": "gen_7_3"},
                                       }
     self.gridStateclass = Multifolder
     self.backendClass = PandaPowerBackend
     self.runner = Runner(init_grid_path=self.init_grid_path,
                          path_chron=self.path_chron,
                          parameters_path=self.parameters_path,
                          names_chronics_to_backend=self.names_chronics_to_backend,
                          gridStateclass=self.gridStateclass,
                          backendClass=self.backendClass,
                          rewardClass=L2RPNReward,
                          max_iter=self.max_iter,
                          name_env="test_runner_env")
예제 #29
0
    def test_env_alarmtime_changed(self):
        """test everything is correct when something is modified"""
        param = Parameters()
        param.ALARM_WINDOW_SIZE = 99
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            env_nm = os.path.join(PATH_DATA_TEST,
                                  "l2rpn_neurips_2020_track1_with_alert")
            env = grid2op.make(env_nm,
                               test=True,
                               chronics_class=ChangeNothing,
                               param=param)
        assert env.parameters.ALARM_WINDOW_SIZE == 99
        assert env.parameters.ALARM_BEST_TIME == 12
        runner = Runner(**env.get_params_for_runner())
        env_runner = runner.init_env()
        assert env_runner.parameters.ALARM_WINDOW_SIZE == 99
        assert env_runner.parameters.ALARM_BEST_TIME == 12

        param = Parameters()
        param.ALARM_BEST_TIME = 42
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            env_nm = os.path.join(PATH_DATA_TEST,
                                  "l2rpn_neurips_2020_track1_with_alert")
            env = grid2op.make(env_nm,
                               test=True,
                               chronics_class=ChangeNothing,
                               param=param)
        assert env.parameters.ALARM_WINDOW_SIZE == 12
        assert env.parameters.ALARM_BEST_TIME == 42
        runner = Runner(**env.get_params_for_runner())
        env_runner = runner.init_env()
        assert env_runner.parameters.ALARM_WINDOW_SIZE == 12
        assert env_runner.parameters.ALARM_BEST_TIME == 42
예제 #30
0
 def test_always_same_order(self):
     # test that a call to "run" will do always the same chronics in the same order
     # regardless of the seed or the parallelism or the number of call to runner.run
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore")
         with make("rte_case14_test", test=True) as env:
             runner = Runner(**env.get_params_for_runner())
     res = runner.run(nb_episode=2,
                      nb_process=2,
                      max_iter=self.max_iter,
                      env_seeds=[1, 2],
                      agent_seeds=[3, 4])
     first_ = [el[0] for el in res]
     res = runner.run(nb_episode=2,
                      nb_process=1,
                      max_iter=self.max_iter,
                      env_seeds=[1, 2],
                      agent_seeds=[3, 4])
     second_ = [el[0] for el in res]
     res = runner.run(nb_episode=2,
                      nb_process=1,
                      max_iter=self.max_iter,
                      env_seeds=[9, 10])
     third_ = [el[0] for el in res]
     res = runner.run(nb_episode=2,
                      nb_process=2,
                      max_iter=self.max_iter,
                      env_seeds=[1, 2],
                      agent_seeds=[3, 4])
     fourth_ = [el[0] for el in res]
     assert np.all(first_ == second_)
     assert np.all(first_ == third_)
     assert np.all(first_ == fourth_)