Beispiel #1
0
    def __init__(self,
                 backend_instanciated,
                 completeActionClass,
                 parameters,
                 reward_helper,
                 obsClass,
                 action_helper,
                 thermal_limit_a,
                 legalActClass,
                 donothing_act,
                 helper_action_class,
                 helper_action_env,
                 other_rewards={}):
        BaseEnv.__init__(self,
                         copy.deepcopy(parameters),
                         thermal_limit_a,
                         other_rewards=other_rewards)
        self._helper_action_class = helper_action_class
        self._reward_helper = reward_helper
        self._obsClass = None

        self.gen_activeprod_t_init = np.zeros(self.n_gen, dtype=dt_float)
        self.gen_activeprod_t_redisp_init = np.zeros(self.n_gen,
                                                     dtype=dt_float)
        self.times_before_line_status_actionable_init = np.zeros(self.n_line,
                                                                 dtype=dt_int)
        self.times_before_topology_actionable_init = np.zeros(self.n_sub,
                                                              dtype=dt_int)
        self.time_next_maintenance_init = np.zeros(self.n_line, dtype=dt_int)
        self.duration_next_maintenance_init = np.zeros(self.n_line,
                                                       dtype=dt_int)
        self.target_dispatch_init = np.zeros(self.n_gen, dtype=dt_float)
        self.actual_dispatch_init = np.zeros(self.n_gen, dtype=dt_float)

        self._init_backend(init_grid_path=None,
                           chronics_handler=_ObsCH(),
                           backend=backend_instanciated,
                           names_chronics_to_backend=None,
                           actionClass=action_helper.actionClass,
                           observationClass=obsClass,
                           rewardClass=None,
                           legalActClass=legalActClass)
        self.no_overflow_disconnection = parameters.NO_OVERFLOW_DISCONNECTION

        self._load_p, self._load_q, self._load_v = None, None, None
        self._prod_p, self._prod_q, self._prod_v = None, None, None
        self._topo_vect = None

        # convert line status to -1 / 1 instead of false / true
        self._line_status = None
        self.is_init = False
        self._helper_action_env = helper_action_env
        self.env_modification = self._helper_action_env()
        self._do_nothing_act = self._helper_action_env()
        self._backend_action_set = self._backend_action_class()

        # opponent
        self.opp_space_state = None
        self.opp_state = None
Beispiel #2
0
    def __init__(self,
                 init_grid_path: str,
                 chronics_handler,
                 backend,
                 parameters,
                 name="unknown",
                 names_chronics_to_backend=None,
                 actionClass=TopologyAction,
                 observationClass=CompleteObservation,
                 rewardClass=FlatReward,
                 legalActClass=AlwaysLegal,
                 voltagecontrolerClass=ControlVoltageFromFile,
                 other_rewards={},
                 thermal_limit_a=None,
                 epsilon_poly=1e-2,
                 tol_poly=1e-6,
                 opponent_action_class=DontAct,
                 opponent_class=BaseOpponent,
                 opponent_init_budget=0,
                 _raw_backend_class=None,
                 with_forecast=True):
        BaseEnv.__init__(self,
                         parameters=parameters,
                         thermal_limit_a=thermal_limit_a,
                         epsilon_poly=epsilon_poly,
                         tol_poly=tol_poly,
                         other_rewards=other_rewards,
                         with_forecast=with_forecast)
        if name == "unknown":
            warnings.warn(
                "It is NOT recommended to create an environment without \"make\" and EVEN LESS "
                "to use an environment without a name")
        self.name = name
        # the voltage controler
        self.voltagecontrolerClass = voltagecontrolerClass
        self.voltage_controler = None

        # for gym compatibility (initialized below)
        self.action_space = None
        self.observation_space = None
        self.reward_range = None
        self.viewer = None
        self.metadata = None
        self.spec = None

        # for opponent (should be defined here) after the initialization of BaseEnv
        self.opponent_action_class = opponent_action_class
        self.opponent_class = opponent_class
        self.opponent_init_budget = opponent_init_budget

        if _raw_backend_class is None:
            self._raw_backend_class = type(backend)
        else:
            _raw_backend_class = _raw_backend_class
        # for plotting
        self.init_backend(init_grid_path, chronics_handler, backend,
                          names_chronics_to_backend, actionClass,
                          observationClass, rewardClass, legalActClass)
Beispiel #3
0
    def __init__(self,
                 backend_instanciated,
                 completeActionClass,
                 parameters,
                 reward_helper,
                 obsClass,
                 action_helper,
                 thermal_limit_a,
                 legalActClass,
                 donothing_act,
                 helper_action_class,
                 helper_action_env,
                 other_rewards={}):
        BaseEnv.__init__(self,
                         parameters,
                         thermal_limit_a,
                         other_rewards=other_rewards)
        self.helper_action_class = helper_action_class
        self.reward_helper = reward_helper
        self.obsClass = None
        # self._action = None
        self.CompleteActionClass = completeActionClass
        self.init_backend(init_grid_path=None,
                          chronics_handler=_ObsCH(),
                          backend=backend_instanciated,
                          names_chronics_to_backend=None,
                          actionClass=action_helper.actionClass,
                          observationClass=obsClass,
                          rewardClass=None,
                          legalActClass=legalActClass)
        self.no_overflow_disconnection = parameters.NO_OVERFLOW_DISCONNECTION

        self._load_p, self._load_q, self._load_v = None, None, None
        self._prod_p, self._prod_q, self._prod_v = None, None, None
        self._topo_vect = None

        # convert line status to -1 / 1 instead of false / true
        self._line_status = None
        self.is_init = False
        self.helper_action_env = helper_action_env
        self.env_modification = self.helper_action_env()
        self._do_nothing_act = self.helper_action_env()
        self._backend_action_set = self._backend_action_class()

        # opponent
        self.opp_space_state = None
        self.opp_state = None
Beispiel #4
0
    def __init__(self,
                 init_grid_path: str,
                 chronics_handler,
                 backend,
                 parameters,
                 name="unknown",
                 names_chronics_to_backend=None,
                 actionClass=TopologyAction,
                 observationClass=CompleteObservation,
                 rewardClass=FlatReward,
                 legalActClass=AlwaysLegal,
                 voltagecontrolerClass=ControlVoltageFromFile,
                 other_rewards={},
                 thermal_limit_a=None,
                 with_forecast=True,
                 epsilon_poly=1e-4,  # precision of the redispatching algorithm we don't recommend to go above 1e-4
                 tol_poly=1e-2,  # i need to compute a redispatching if the actual values are "more than tol_poly" the values they should be
                 opponent_action_class=DontAct,
                 opponent_class=BaseOpponent,
                 opponent_init_budget=0.,
                 opponent_budget_per_ts=0.,
                 opponent_budget_class=NeverAttackBudget,
                 opponent_attack_duration=0,
                 opponent_attack_cooldown=99999,
                 kwargs_opponent={},
                 _raw_backend_class=None
                 ):
        BaseEnv.__init__(self,
                         parameters=parameters,
                         thermal_limit_a=thermal_limit_a,
                         epsilon_poly=epsilon_poly,
                         tol_poly=tol_poly,
                         other_rewards=other_rewards,
                         with_forecast=with_forecast,
                         voltagecontrolerClass=voltagecontrolerClass,
                         opponent_action_class=opponent_action_class,
                         opponent_class=opponent_class,
                         opponent_budget_class=opponent_budget_class,
                         opponent_init_budget=opponent_init_budget,
                         opponent_budget_per_ts=opponent_budget_per_ts,
                         opponent_attack_duration=opponent_attack_duration,
                         opponent_attack_cooldown=opponent_attack_cooldown,
                         kwargs_opponent=kwargs_opponent)
        if name == "unknown":
            warnings.warn("It is NOT recommended to create an environment without \"make\" and EVEN LESS "
                          "to use an environment without a name")
        self.name = name

        # for gym compatibility (initialized below)
        self.action_space = None
        self.observation_space = None
        self.reward_range = None
        self.viewer = None
        self.metadata = None
        self.spec = None

        if _raw_backend_class is None:
            self._raw_backend_class = type(backend)
        else:
            self._raw_backend_class = _raw_backend_class

        # for plotting
        self._init_backend(init_grid_path, chronics_handler, backend,
                           names_chronics_to_backend, actionClass, observationClass,
                           rewardClass, legalActClass)
Beispiel #5
0
    def __init__(
        self,
        init_grid_path,
        backend_instanciated,
        parameters,
        reward_helper,
        obsClass,
        action_helper,
        thermal_limit_a,
        legalActClass,
        helper_action_class,
        helper_action_env,
        epsilon_poly,
        tol_poly,
        max_episode_duration,
        other_rewards={},
        has_attention_budget=False,
        attention_budget_cls=LinearAttentionBudget,
        kwargs_attention_budget={},
    ):
        BaseEnv.__init__(self,
                         init_grid_path,
                         copy.deepcopy(parameters),
                         thermal_limit_a,
                         other_rewards=other_rewards,
                         epsilon_poly=epsilon_poly,
                         tol_poly=tol_poly,
                         has_attention_budget=has_attention_budget,
                         attention_budget_cls=attention_budget_cls,
                         kwargs_attention_budget=kwargs_attention_budget)
        self._helper_action_class = helper_action_class
        self._reward_helper = reward_helper
        self._obsClass = None
        self._obsClass = obsClass.init_grid(type(backend_instanciated))

        self.gen_activeprod_t_init = np.zeros(self.n_gen, dtype=dt_float)
        self.gen_activeprod_t_redisp_init = np.zeros(self.n_gen,
                                                     dtype=dt_float)
        self.times_before_line_status_actionable_init = np.zeros(self.n_line,
                                                                 dtype=dt_int)
        self.times_before_topology_actionable_init = np.zeros(self.n_sub,
                                                              dtype=dt_int)
        self.time_next_maintenance_init = np.zeros(self.n_line, dtype=dt_int)
        self.duration_next_maintenance_init = np.zeros(self.n_line,
                                                       dtype=dt_int)
        self.target_dispatch_init = np.zeros(self.n_gen, dtype=dt_float)
        self.actual_dispatch_init = np.zeros(self.n_gen, dtype=dt_float)

        self._init_backend(chronics_handler=_ObsCH(),
                           backend=backend_instanciated,
                           names_chronics_to_backend=None,
                           actionClass=action_helper.actionClass,
                           observationClass=self._obsClass,
                           rewardClass=None,
                           legalActClass=legalActClass)
        self.no_overflow_disconnection = parameters.NO_OVERFLOW_DISCONNECTION

        self._load_p, self._load_q, self._load_v = None, None, None
        self._prod_p, self._prod_q, self._prod_v = None, None, None
        self._topo_vect = None

        # convert line status to -1 / 1 instead of false / true
        self._line_status = None
        self.is_init = False
        self._helper_action_env = helper_action_env
        self.env_modification = self._helper_action_env()
        self._do_nothing_act = self._helper_action_env()
        self._backend_action_set = self._backend_action_class()

        # opponent
        self.opp_space_state = None
        self.opp_state = None

        # storage
        self._storage_current_charge_init = None
        self._storage_previous_charge_init = None
        self._action_storage_init = None
        self._amount_storage_init = None
        self._amount_storage_prev_init = None
        self._storage_power_init = None

        # storage unit
        self._storage_current_charge_init = np.zeros(self.n_storage,
                                                     dtype=dt_float)
        self._storage_previous_charge_init = np.zeros(self.n_storage,
                                                      dtype=dt_float)
        self._action_storage_init = np.zeros(self.n_storage, dtype=dt_float)
        self._storage_power_init = np.zeros(self.n_storage, dtype=dt_float)
        self._amount_storage_init = 0.
        self._amount_storage_prev_init = 0.

        # curtailment
        self._limit_curtailment_init = np.zeros(self.n_gen, dtype=dt_float)
        self._gen_before_curtailment_init = np.zeros(self.n_gen,
                                                     dtype=dt_float)
        self._sum_curtailment_mw_init = 0.
        self._sum_curtailment_mw_prev_init = 0.

        # step count
        self._nb_time_step_init = 0

        # alarm / attention budget
        self._attention_budget_state_init = None

        self._disc_lines = np.zeros(shape=self.n_line, dtype=dt_int) - 1
        self._max_episode_duration = max_episode_duration
Beispiel #6
0
    def __init__(
        self,
        init_grid_path,
        backend_instanciated,
        parameters,
        reward_helper,
        obsClass,  # not initialized :-/
        action_helper,
        thermal_limit_a,
        legalActClass,
        helper_action_class,
        helper_action_env,
        epsilon_poly,
        tol_poly,
        max_episode_duration,
        other_rewards={},
        has_attention_budget=False,
        attention_budget_cls=LinearAttentionBudget,
        kwargs_attention_budget={},
        _complete_action_cls=None,
    ):
        BaseEnv.__init__(self,
                         init_grid_path,
                         copy.deepcopy(parameters),
                         thermal_limit_a,
                         other_rewards=other_rewards,
                         epsilon_poly=epsilon_poly,
                         tol_poly=tol_poly,
                         has_attention_budget=has_attention_budget,
                         attention_budget_cls=attention_budget_cls,
                         kwargs_attention_budget=kwargs_attention_budget)
        self._reward_helper = reward_helper
        self._helper_action_class = helper_action_class

        # initialize the observation space
        self._obsClass = None

        self.gen_activeprod_t_init = np.zeros(self.n_gen, dtype=dt_float)
        self.gen_activeprod_t_redisp_init = np.zeros(self.n_gen,
                                                     dtype=dt_float)
        self.times_before_line_status_actionable_init = np.zeros(self.n_line,
                                                                 dtype=dt_int)
        self.times_before_topology_actionable_init = np.zeros(self.n_sub,
                                                              dtype=dt_int)
        self.time_next_maintenance_init = np.zeros(self.n_line, dtype=dt_int)
        self.duration_next_maintenance_init = np.zeros(self.n_line,
                                                       dtype=dt_int)
        self.target_dispatch_init = np.zeros(self.n_gen, dtype=dt_float)
        self.actual_dispatch_init = np.zeros(self.n_gen, dtype=dt_float)

        # line status (inherited from BaseEnv)
        self._line_status = np.full(self.n_line,
                                    dtype=dt_bool,
                                    fill_value=True)
        # line status (for this usage)
        self._line_status_me = np.ones(
            shape=self.n_line,
            dtype=dt_int)  # this is "line status" but encode in +1 / -1
        self._line_status_orig = np.ones(shape=self.n_line, dtype=dt_int)

        self._init_backend(chronics_handler=_ObsCH(),
                           backend=backend_instanciated,
                           names_chronics_to_backend=None,
                           actionClass=action_helper.actionClass,
                           observationClass=obsClass,
                           rewardClass=None,
                           legalActClass=legalActClass)

        ####
        # to be able to save and import (using env.generate_classes) correctly
        self._actionClass = action_helper.subtype
        self._observationClass = _complete_action_cls  # not used anyway
        self._complete_action_cls = _complete_action_cls
        self._action_space = action_helper  # obs env and env share the same action space
        self._observation_space = action_helper  # not used here, so it's definitely a hack !
        ####

        self.no_overflow_disconnection = parameters.NO_OVERFLOW_DISCONNECTION

        self._load_p, self._load_q, self._load_v = None, None, None
        self._prod_p, self._prod_q, self._prod_v = None, None, None
        self._topo_vect = None

        # other stuff
        self.is_init = False
        self._helper_action_env = helper_action_env
        self.env_modification = self._helper_action_env()
        self._do_nothing_act = self._helper_action_env()
        self._backend_action_set = self._backend_action_class()

        # opponent
        self.opp_space_state = None
        self.opp_state = None

        # storage
        self._storage_current_charge_init = None
        self._storage_previous_charge_init = None
        self._action_storage_init = None
        self._amount_storage_init = None
        self._amount_storage_prev_init = None
        self._storage_power_init = None

        # storage unit
        self._storage_current_charge_init = np.zeros(self.n_storage,
                                                     dtype=dt_float)
        self._storage_previous_charge_init = np.zeros(self.n_storage,
                                                      dtype=dt_float)
        self._action_storage_init = np.zeros(self.n_storage, dtype=dt_float)
        self._storage_power_init = np.zeros(self.n_storage, dtype=dt_float)
        self._amount_storage_init = 0.
        self._amount_storage_prev_init = 0.

        # curtailment
        self._limit_curtailment_init = np.zeros(self.n_gen, dtype=dt_float)
        self._gen_before_curtailment_init = np.zeros(self.n_gen,
                                                     dtype=dt_float)
        self._sum_curtailment_mw_init = 0.
        self._sum_curtailment_mw_prev_init = 0.

        # step count
        self._nb_time_step_init = 0

        # alarm / attention budget
        self._attention_budget_state_init = None

        self._disc_lines = np.zeros(shape=self.n_line, dtype=dt_int) - 1
        self._max_episode_duration = max_episode_duration