Exemplo n.º 1
0
    def __init__(self, gym_env: gym.Env,
                       set_state: Callable[[gym.Env, D.T_memory[D.T_state]], None] = None,
                       get_state: Callable[[gym.Env], D.T_memory[D.T_state]] = None,
                       continuous_feature_fidelity: int = 1,
                       discretization_factor: int = 10,
                       branching_factor: int = None,
                       max_depth: int = None) -> None:
        """Initialize GymRIWDomain.

        # Parameters
        gym_env: The deterministic Gym environment (gym.env) to wrap.
        set_state: Function to call to set the state of the gym environment.
                   If None, default behavior is to deepcopy the environment when changing state
        get_state: Function to call to get the state of the gym environment.
                   If None, default behavior is to deepcopy the environment when changing state
        continuous_feature_fidelity: Number of integers to represent a continuous feature
                                     in the interval-based feature abstraction (higher is more precise)
        discretization_factor: Number of discretized action variable values per continuous action variable
        branching_factor: if not None, sample branching_factor actions from the resulting list of discretized actions
        max_depth: maximum depth of states to explore from the initial state
        """
        GymDomainHashable.__init__(self, gym_env=gym_env)
        GymDiscreteActionDomain.__init__(self,
                                         discretization_factor=discretization_factor,
                                         branching_factor=branching_factor)
        GymWidthDomain.__init__(self, continuous_feature_fidelity=continuous_feature_fidelity)
        gym_env._max_episode_steps = max_depth
        self._max_depth = max_depth
        self._current_depth = 0
        self._cumulated_reward = 0
        self._continuous_feature_fidelity = continuous_feature_fidelity
        self._map = None
        self._path = None
        self._must_reset_features = True
Exemplo n.º 2
0
    def __init__(
        self,
        gym_env: gym.Env,
        discretization_factor: int = 10,
        branching_factor: int = None,
        max_depth: int = None,
    ) -> None:
        """Initialize GymRIWDomain.

        # Parameters
        gym_env: The deterministic Gym environment (gym.env) to wrap.
        discretization_factor: Number of discretized action variable values per continuous action variable
        branching_factor: if not None, sample branching_factor actions from the resulting list of discretized actions
        max_depth: maximum depth of states to explore from the initial state
        """
        GymDomain.__init__(self, gym_env=gym_env)
        GymDiscreteActionDomain.__init__(
            self,
            discretization_factor=discretization_factor,
            branching_factor=branching_factor,
        )
        gym_env._max_episode_steps = max_depth
        self.current_outcome = None
        self._map = None
        self._path = None
    def __init__(self,
                 gym_env: gym.Env,
                 set_state: Callable[[gym.Env, D.T_memory[D.T_state]],
                                     None] = None,
                 get_state: Callable[[gym.Env], D.T_memory[D.T_state]] = None,
                 discretization_factor: int = 10,
                 branching_factor: int = None,
                 horizon: int = 1000) -> None:
        """Initialize GymRIWDomain.

        # Parameters
        gym_env: The deterministic Gym environment (gym.env) to wrap.
        set_state: Function to call to set the state of the gym environment.
                   If None, default behavior is to deepcopy the environment when changing state
        get_state: Function to call to get the state of the gym environment.
                   If None, default behavior is to deepcopy the environment when changing state
        discretization_factor: Number of discretized action variable values per continuous action variable
        branching_factor: if not None, sample branching_factor actions from the resulting list of discretized actions
        horizon: maximum number of steps allowed for the gym environment
        """
        DeterministicGymDomain.__init__(self,
                                        gym_env=gym_env,
                                        set_state=set_state,
                                        get_state=get_state)
        GymDiscreteActionDomain.__init__(
            self,
            discretization_factor=discretization_factor,
            branching_factor=branching_factor)
        gym_env._max_episode_steps = horizon
        self._map = None
        self._path = None
Exemplo n.º 4
0
 def __init__(self, gym_env: gym.Env,
              set_state: Callable[[gym.Env, D.T_memory[D.T_state]], None] = None,
              get_state: Callable[[gym.Env], D.T_memory[D.T_state]] = None,
              termination_is_goal: bool = True,
              continuous_feature_fidelity: int = 1,
              discretization_factor: int = 3,
              branching_factor: int = None,
              max_depth: int = 50) -> None:
     GymPlanningDomain.__init__(self,
                                gym_env=gym_env,
                                set_state=set_state,
                                get_state=get_state,
                                termination_is_goal=termination_is_goal,
                                max_depth=max_depth)
     GymDiscreteActionDomain.__init__(self,
                                      discretization_factor=discretization_factor,
                                      branching_factor=branching_factor)
     GymWidthDomain.__init__(self, continuous_feature_fidelity=continuous_feature_fidelity)
     gym_env._max_episode_steps = max_depth
Exemplo n.º 5
0
    def __init__(self,
                 gym_env: gym.Env,
                 set_state: Callable[[gym.Env, D.T_memory[D.T_state]],
                                     None] = None,
                 get_state: Callable[[gym.Env], D.T_memory[D.T_state]] = None,
                 termination_is_goal: bool = True,
                 continuous_feature_fidelity: int = 1,
                 discretization_factor: int = 3,
                 branching_factor: int = None,
                 max_depth: int = 50) -> None:
        """Initialize GymIWDomain.

        # Parameters
        gym_env: The deterministic Gym environment (gym.env) to wrap.
        set_state: Function to call to set the state of the gym environment.
                   If None, default behavior is to deepcopy the environment when changing state
        get_state: Function to call to get the state of the gym environment.
                   If None, default behavior is to deepcopy the environment when changing state
        termination_is_goal: True if the termination condition is a goal (and not a dead-end)
        continuous_feature_fidelity: Number of integers to represent a continuous feature
                                     in the interval-based feature abstraction (higher is more precise)
        discretization_factor: Number of discretized action variable values per continuous action variable
        branching_factor: if not None, sample branching_factor actions from the resulting list of discretized actions
        max_depth: maximum depth of states to explore from the initial state
        """
        GymPlanningDomain.__init__(self,
                                   gym_env=gym_env,
                                   set_state=set_state,
                                   get_state=get_state,
                                   termination_is_goal=termination_is_goal,
                                   max_depth=max_depth)
        GymDiscreteActionDomain.__init__(
            self,
            discretization_factor=discretization_factor,
            branching_factor=branching_factor)
        GymWidthDomain.__init__(
            self, continuous_feature_fidelity=continuous_feature_fidelity)
        gym_env._max_episode_steps = max_depth