Exemplo n.º 1
0
    def __init__(self, envs, nb_envs):
        try:
            nb_envs = np.array(nb_envs)
            nb_envs = nb_envs.astype(dt_int)
        except Exception as exc_:
            raise MultiEnvException(
                "\"nb_envs\" argument should be a list of integers. We could not "
                "convert it to such with error \"{}\"".format(exc_))

        if np.any(nb_envs < 0):
            raise MultiEnvException(
                "You ask to perform \"{}\" copy of an environment. This is a negative "
                "integer. I cannot do that. Please make sure \"nb_envs\" argument "
                "is all made of strictly positive integers and not {}."
                "".format(np.min(nb_envs), nb_envs))
        if np.any(nb_envs == 0):
            raise MultiEnvException(
                "You ask to perform 0 copy of an environment. This is not supported at "
                "the moment. Please make sure \"nb_envs\" argument "
                "is all made of strictly positive integers and not {}."
                "".format(nb_envs))

        all_envs = []
        for e, n in enumerate(nb_envs):
            all_envs += [envs[e] for _ in range(n)]
        super().__init__(all_envs)
Exemplo n.º 2
0
    def simulate(self, actions):
        """
        Perform the equivalent of `obs.simulate` in all the underlying environment

        Parameters
        ----------
        actions: ``list``
            List of all action to simulate

        Returns
        ---------
        sim_obs:
            The observation resulting from the simulation
        sim_rews:
            The reward resulting from the simulation
        sim_dones:
            For each simulation, whether or not this the simulated action lead to a game over
        sim_infos:
            Additional information for each simulated actions.

        Examples
        --------

        You can use this feature like:

        .. code-block::

            import grid2op
            from grid2op.Environment import BaseMultiProcessEnvironment

            env_name = ...  # for example "l2rpn_case14_sandbox"
            env1 = grid2op.make(env_name)
            env2 = grid2op.make(env_name)

            multi_env = BaseMultiProcessEnvironment([env1, env2])
            obss = multi_env.reset()

            # simulate
            actions = [env1.action_space(), env2.action_space()]
            sim_obss, sim_rs, sim_ds, sim_is = multi_env.simulate(actions)

        """
        if len(actions) != self.nb_env:
            raise MultiEnvException(
                "Incorrect number of actions provided. You provided {} actions, but the "
                "MultiEnvironment counts {} different environment."
                "".format(len(actions), self.nb_env))
        for act in actions:
            if not isinstance(act, BaseAction):
                raise MultiEnvException(
                    "All actions send to MultiEnvironment.step should be of type "
                    "\"grid2op.BaseAction\" and not {}".format(type(act)))

        self._send_sim(actions)
        sim_obs, sim_rews, sim_dones, sim_infos = self._wait_for_obs()
        return sim_obs, sim_rews, sim_dones, sim_infos
Exemplo n.º 3
0
    def __init__(self, envs):
        GridObjects.__init__(self)
        self.envs = envs
        for env in envs:
            if not isinstance(env, Environment):
                raise MultiEnvException(
                    "You provided environment of type \"{}\" which is not supported."
                    "Please only provide a grid2op.Environment.Environment class."
                    "".format(type(env)))

        self.nb_env = len(envs)
        max_int = np.iinfo(dt_int).max
        self._remotes, self._work_remotes = zip(
            *[Pipe() for _ in range(self.nb_env)])

        env_params = [
            envs[e].get_kwargs(with_backend=False) for e in range(self.nb_env)
        ]
        self._ps = [
            RemoteEnv(env_params=env_,
                      remote=work_remote,
                      parent_remote=remote,
                      name="{}_subprocess_{}".format(envs[i].name, i),
                      seed=envs[i].space_prng.randint(max_int))
            for i, (work_remote, remote, env_) in enumerate(
                zip(self._work_remotes, self._remotes, env_params))
        ]

        for p in self._ps:
            p.daemon = True  # if the main process crashes, we should not cause things to hang
            p.start()
        for remote in self._work_remotes:
            remote.close()

        self._waiting = True
Exemplo n.º 4
0
    def step(self, actions):
        """
        Perform a step in all the underlying environments.
        If one or more of the underlying environments encounters a game over, it is automatically restarted.

        The observation sent back to the user is the observation after the :func:`grid2op.Environment.Environment.reset`
        has been called.

        It has no impact on the other underlying environments.

        Parameters
        ----------
        actions: ``list``
            List of :attr:`MultiEnvironment.nb_env` :class:`grid2op.Action.BaseAction`. Each action will be executed
            in the corresponding underlying environment.

        Returns
        -------
        obs: ``list``
            List all the observations returned by each underlying environment.

        rews: ``list``
            List all the rewards returned by each underlying environment.

        dones: ``list``
            List all the "done" returned by each underlying environment. If one of this value is "True" this means
            the environment encounter a game over.

        infos
        """
        if len(actions) != self.nb_env:
            raise MultiEnvException(
                "Incorrect number of actions provided. You provided {} actions, but the "
                "MultiEnvironment counts {} different environment."
                "".format(len(actions), self.nb_env))
        for act in actions:
            if not isinstance(act, BaseAction):
                raise MultiEnvException(
                    "All actions send to MultiEnvironment.step should be of type \"grid2op.BaseAction\""
                    "and not {}".format(type(act)))

        self._send_act(actions)
        obs, rews, dones, infos = self._wait_for_obs()
        return obs, rews, dones, infos
Exemplo n.º 5
0
    def step(self, actions):
        """
        Perform a step in all the underlying environments.
        If one or more of the underlying environments encounters a game over, it is automatically restarted.

        The observation sent back to the user is the observation after the :func:`grid2op.Environment.Environment.reset`
        has been called.

        As opposed to :class:`Environment.step` a call to this function will automatically reset
        any of the underlying environments in case one of them is "done". This is performed the following way.
        In the case one underlying environment is over (due to game over or due to end of the chronics), then:

        - the corresponding "done" is returned as ``True``
        - the corresponding observation returned is not the observation of the last time step (corresponding to the
          underlying environment that is game over) but is the first observation after reset.

        At the next call to step, the flag done will be (if not game over arise) set to ``False`` and the
        corresponding observation is the next observation of this underlying environment: every thing works
        as usual in this case.

        We did that because restarting the game over environment added un necessary complexity.

        Parameters
        ----------
        actions: ``list``
            List of :attr:`MultiEnvironment.nb_env` :class:`grid2op.Action.BaseAction`. Each action will be executed
            in the corresponding underlying environment.

        Returns
        -------
        obs: ``list``
            List all the observations returned by each underlying environment.

        rews: ``list``
            List all the rewards returned by each underlying environment.

        dones: ``list``
            List all the "done" returned by each underlying environment. If one of this value is "True" this means
            the environment encounter a game over.

        infos: ``list``
            List of dictionaries corresponding

        Examples
        ---------
        You can use this class as followed:

        .. code-block:: python

            import grid2op
            from grid2op.Environment import BaseMultiProcessEnv
            env1 = grid2op.make()  # create an environment of your choosing
            env2 = grid2op.make()  # create another environment of your choosing

            multi_env = BaseMultiProcessEnv([env1, env2])
            obss = multi_env.reset()
            obs1, obs2 = obss  # here i extract the observation of the first environment and of the second one
            # note that you cannot do obs1.simulate().
            # this is equivalent to a call to
            # obs1 = env1.reset(); obs2 = env2.reset()

            # then you can do regular steps
            action_env1 = env1.action_space()
            action_env2 = env2.action_space()
            obss, rewards, dones, infos = env.step([action_env1, action_env2])
            # if you define
            # obs1, obs2 = obss
            # r1, r2 = rewards
            # done1, done2 = dones
            # info1, info2 = infos
            # in this case, it is equivalent to calling
            # obs1, r1, done1, info1 = env1.step(action_env1)
            # obs2, r2, done2, info2 = env2.step(action_env2)

        Let us now focus on the "automatic" reset part.

        .. code-block:: python

            # see above for the creation of a multi_env and the proper imports
            multi_env = BaseMultiProcessEnv([env1, env2])
            action_env1 = env1.action_space()
            action_env2 = env2.action_space()
            obss, rewards, dones, infos = env.step([action_env1, action_env2])

            # say dones[0] is ``True``
            # in this case if you define
            # obs1 = obss[0]
            # r1=rewards[0]
            # done1=done[0]
            # info1=info[0]
            # in that case it is equivalent to the "single processed" code
            # obs1_tmp, r1_tmp, done1_tmp, info1_tmp = env1.step(action_env1)
            # done1 = done1_tmp
            # r1 = r1_tmp
            # info1 = info1_tmp
            # obs1_aux = env1.reset()
            # obs1 = obs1_aux
            # CAREFULLL in this case, obs1 is NOT obs1_tmp but is really

        """
        if len(actions) != self.nb_env:
            raise MultiEnvException(
                "Incorrect number of actions provided. You provided {} actions, but the "
                "MultiEnvironment counts {} different environment."
                "".format(len(actions), self.nb_env))
        for act in actions:
            if not isinstance(act, BaseAction):
                raise MultiEnvException(
                    "All actions send to MultiEnvironment.step should be of type \"grid2op.BaseAction\""
                    "and not {}".format(type(act)))

        self._send_act(actions)
        obs, rews, dones, infos = self._wait_for_obs()
        return obs, rews, dones, infos