Ejemplo n.º 1
0
    def to_base_env(env,
                    make_env=None,
                    num_envs=1,
                    remote_envs=False,
                    async_remote_envs=False):
        """Wraps any env type as needed to expose the async interface."""

        from ray.rllib.env.remote_vector_env import RemoteVectorEnv
        if (remote_envs or async_remote_envs) and num_envs == 1:
            raise ValueError(
                "Remote envs only make sense to use if num_envs > 1 "
                "(i.e. vectorization is enabled).")
        if remote_envs and async_remote_envs:
            raise ValueError("You can only specify one of remote_envs or "
                             "async_remote_envs.")

        if not isinstance(env, BaseEnv):
            if isinstance(env, MultiAgentEnv):
                if remote_envs:
                    env = RemoteVectorEnv(make_env,
                                          num_envs,
                                          multiagent=True,
                                          sync=True)
                elif async_remote_envs:
                    env = RemoteVectorEnv(make_env,
                                          num_envs,
                                          multiagent=True,
                                          sync=False)
                else:
                    env = _MultiAgentEnvToBaseEnv(make_env=make_env,
                                                  existing_envs=[env],
                                                  num_envs=num_envs)
            elif isinstance(env, ExternalEnv):
                if num_envs != 1:
                    raise ValueError(
                        "ExternalEnv does not currently support num_envs > 1.")
                env = _ExternalEnvToBaseEnv(env)
            elif isinstance(env, VectorEnv):
                env = _VectorEnvToBaseEnv(env)
            else:
                if remote_envs:
                    env = RemoteVectorEnv(make_env,
                                          num_envs,
                                          multiagent=False,
                                          sync=True)
                elif async_remote_envs:
                    env = RemoteVectorEnv(make_env,
                                          num_envs,
                                          multiagent=False,
                                          sync=False)
                else:
                    env = VectorEnv.wrap(
                        make_env=make_env,
                        existing_envs=[env],
                        num_envs=num_envs,
                        action_space=env.action_space,
                        observation_space=env.observation_space)
                    env = _VectorEnvToBaseEnv(env)
        assert isinstance(env, BaseEnv), env
        return env
Ejemplo n.º 2
0
    def to_base_env(
        env: EnvType,
        make_env: Callable[[int], EnvType] = None,
        num_envs: int = 1,
        remote_envs: bool = False,
        remote_env_batch_wait_ms: int = 0,
        policy_config: PartialTrainerConfigDict = None,
    ) -> "BaseEnv":
        """Wraps any env type as needed to expose the async interface."""

        from ray.rllib.env.remote_vector_env import RemoteVectorEnv
        if remote_envs and num_envs == 1:
            raise ValueError(
                "Remote envs only make sense to use if num_envs > 1 "
                "(i.e. vectorization is enabled).")

        if not isinstance(env, BaseEnv):
            if isinstance(env, MultiAgentEnv):
                if remote_envs:
                    env = RemoteVectorEnv(
                        make_env,
                        num_envs,
                        multiagent=True,
                        remote_env_batch_wait_ms=remote_env_batch_wait_ms)
                else:
                    env = _MultiAgentEnvToBaseEnv(make_env=make_env,
                                                  existing_envs=[env],
                                                  num_envs=num_envs)
            elif isinstance(env, ExternalEnv):
                if num_envs != 1:
                    raise ValueError(
                        "External(MultiAgent)Env does not currently support "
                        "num_envs > 1. One way of solving this would be to "
                        "treat your Env as a MultiAgentEnv hosting only one "
                        "type of agent but with several copies.")
                env = _ExternalEnvToBaseEnv(env)
            elif isinstance(env, VectorEnv):
                env = _VectorEnvToBaseEnv(env)
            else:
                if remote_envs:
                    env = RemoteVectorEnv(
                        make_env,
                        num_envs,
                        multiagent=False,
                        remote_env_batch_wait_ms=remote_env_batch_wait_ms,
                        existing_envs=[env],
                    )
                else:
                    env = VectorEnv.wrap(
                        make_env=make_env,
                        existing_envs=[env],
                        num_envs=num_envs,
                        action_space=env.action_space,
                        observation_space=env.observation_space,
                        policy_config=policy_config,
                    )
                    env = _VectorEnvToBaseEnv(env)
        assert isinstance(env, BaseEnv), env
        return env
Ejemplo n.º 3
0
    def to_base_env(
            env: EnvType,
            make_env: Callable[[int], EnvType] = None,
            num_envs: int = 1,
            remote_envs: bool = False,
            remote_env_batch_wait_ms: int = 0,
            policy_config: Optional[PartialTrainerConfigDict] = None,
    ) -> "BaseEnv":
        """Converts an RLlib-supported env into a BaseEnv object.

        Supported types for the given `env` arg are gym.Env, BaseEnv,
        VectorEnv, MultiAgentEnv, or ExternalEnv.

        The resulting BaseEnv is always vectorized (contains n
        sub-environments) for batched forward passes, where n may also be 1.
        BaseEnv also supports async execution via the `poll` and `send_actions`
        methods and thus supports external simulators.

        TODO: Support gym3 environments, which are already vectorized.

        Args:
            env: An already existing environment of any supported env type
                to convert/wrap into a BaseEnv. Supported types are gym.Env,
                BaseEnv, VectorEnv, MultiAgentEnv, ExternalEnv, or
                ExternalMultiAgentEnv.
            make_env: A callable taking an int as input (which indicates the
                number of individual sub-environments within the final
                vectorized BaseEnv) and returning one individual
                sub-environment.
            num_envs: The number of sub-environments to create in the
                resulting (vectorized) BaseEnv. The already existing `env`
                will be one of the `num_envs`.
            remote_envs: Whether each sub-env should be a @ray.remote actor.
                You can set this behavior in your config via the
                `remote_worker_envs=True` option.
            remote_env_batch_wait_ms: The wait time (in ms) to poll remote
                sub-environments for, if applicable. Only used if
                `remote_envs` is True.
            policy_config: Optional policy config dict.

        Returns:
            The resulting BaseEnv object.
        """

        from ray.rllib.env.remote_vector_env import RemoteVectorEnv
        if remote_envs and num_envs == 1:
            raise ValueError(
                "Remote envs only make sense to use if num_envs > 1 "
                "(i.e. vectorization is enabled).")

        # Given `env` is already a BaseEnv -> Return as is.
        if isinstance(env, BaseEnv):
            return env

        # `env` is not a BaseEnv yet -> Need to convert/vectorize.

        # MultiAgentEnv (which is a gym.Env).
        if isinstance(env, MultiAgentEnv):
            # Sub-environments are ray.remote actors:
            if remote_envs:
                env = RemoteVectorEnv(
                    make_env,
                    num_envs,
                    multiagent=True,
                    remote_env_batch_wait_ms=remote_env_batch_wait_ms)
            # Sub-environments are not ray.remote actors.
            else:
                env = _MultiAgentEnvToBaseEnv(
                    make_env=make_env, existing_envs=[env], num_envs=num_envs)
        # ExternalEnv.
        elif isinstance(env, ExternalEnv):
            if num_envs != 1:
                raise ValueError(
                    "External(MultiAgent)Env does not currently support "
                    "num_envs > 1. One way of solving this would be to "
                    "treat your Env as a MultiAgentEnv hosting only one "
                    "type of agent but with several copies.")
            env = _ExternalEnvToBaseEnv(env)
        # VectorEnv.
        # Note that all BaseEnvs are also vectorized, but the user may want to
        # define custom vectorization logic and thus implement a custom
        # VectorEnv class.
        elif isinstance(env, VectorEnv):
            env = _VectorEnvToBaseEnv(env)
        # Anything else: This usually implies that env is a gym.Env object.
        else:
            # Sub-environments are ray.remote actors:
            if remote_envs:
                # Determine, whether the already existing sub-env (could
                # be a ray.actor) is multi-agent or not.
                multiagent = ray.get(env._is_multi_agent.remote()) if \
                    hasattr(env, "_is_multi_agent") else False
                env = RemoteVectorEnv(
                    make_env,
                    num_envs,
                    multiagent=multiagent,
                    remote_env_batch_wait_ms=remote_env_batch_wait_ms,
                    existing_envs=[env],
                )
            # Sub-environments are not ray.remote actors.
            else:
                env = VectorEnv.vectorize_gym_envs(
                    make_env=make_env,
                    existing_envs=[env],
                    num_envs=num_envs,
                    action_space=env.action_space,
                    observation_space=env.observation_space,
                )
                env = _VectorEnvToBaseEnv(env)

        # Make sure conversion went well.
        assert isinstance(env, BaseEnv), env

        return env